Example #1
0
def test_color_bars():

    sensor.reset()
    # Set sensor settings
    sensor.set_brightness(0)
    sensor.set_saturation(0)
    sensor.set_gainceiling(8)
    sensor.set_contrast(2)

    # Set sensor pixel format
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)

    # Enable colorbar test mode
    sensor.set_colorbar(True)

    # Skip a few frames to allow the sensor settle down
    # Note: This takes more time when exec from the IDE.
    for i in range(0, 100):
        image = sensor.snapshot()

    # Color bars thresholds
    t = [lambda r, g, b: r < 50  and g < 50  and b < 50,   # Black
         lambda r, g, b: r < 50  and g < 50  and b > 200,  # Blue
         lambda r, g, b: r > 200 and g < 50  and b < 50,   # Red
         lambda r, g, b: r > 200 and g < 50  and b > 200,  # Purple
         lambda r, g, b: r < 50  and g > 200 and b < 50,   # Green
         lambda r, g, b: r < 50  and g > 200 and b > 200,  # Aqua
         lambda r, g, b: r > 200 and g > 200 and b < 50,   # Yellow
         lambda r, g, b: r > 200 and g > 200 and b > 200]  # White

    # 320x240 image with 8 color bars each one is approx 40 pixels.
    # we start from the center of the frame buffer, and average the
    # values of 10 sample pixels from the center of each color bar.
    for i in range(0, 8):
        avg = (0, 0, 0)
        idx = 40*i+20 # center of colorbars
        for off in range(0, 10): # avg 10 pixels
            rgb = image.get_pixel(idx+off, 120)
            avg = tuple(map(sum, zip(avg, rgb)))

        if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
            raise Exception("COLOR BARS TEST FAILED. "
            "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))

    print("COLOR BARS TEST PASSED...")
Example #2
0
def unittest(data_path, temp_path):
    import sensor
    sensor.reset()
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)
    img = sensor.snapshot().clear()
    img.set_pixel(img.width()//2+50, 120, 255)
    img.set_pixel(img.width()//2-50, 120, 255)
    img.draw_line([img.width()//2-50, 50, img.width()//2+50, 50])
    img.draw_rectangle([img.width()//2-25, img.height()//2-25, 50, 50])
    img.draw_circle(img.width()//2, img.height()//2, 40)
    img.draw_string(11, 10, "HelloWorld!")
    img.draw_cross(img.width()//2, img.height()//2)
    sensor.flush()
    img.difference(data_path+"/drawing.pgm")
    stats = img.get_statistics()
    return (stats.max() == 0) and (stats.min() == 0)
Example #3
0
# Image Transfer - As The Remote Device
#
# This script is meant to talk to the "image_transfer_jpg_as_the_controller_device.py" on your computer.
#
# This script shows off how to transfer the frame buffer to your computer as a jpeg image.

import image, network, omv, rpc, sensor, struct

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)

# Turn off the frame buffer connection to the IDE from the OpenMV Cam side.
#
# This needs to be done when manually compressing jpeg images at higher quality
# so that the OpenMV Cam does not try to stream them to the IDE using a fall back
# mechanism if the JPEG image is too large to fit in the IDE JPEG frame buffer on the OpenMV Cam.

omv.disable_fb(True)

# The RPC library above is installed on your OpenMV Cam and provides mutliple classes for
# allowing your OpenMV Cam to be controlled over USB or LAN/WLAN.

################################################################
# Choose the interface you wish to control your OpenMV Cam over.
################################################################

# Uncomment the below line to setup your OpenMV Cam for control over a USB VCP.
#
interface = rpc.rpc_usb_vcp_slave()
Example #4
0
# Object tracking with keypoints example.
# Show the camera an object and then run the script. A set of keypoints will be extracted
# once and then tracked in the following frames. If you want a new set of keypoints re-run
# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints.
import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)

sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)

def draw_keypoints(img, kpts):
    print(kpts)
    img.draw_keypoints(kpts)
    img = sensor.snapshot()
    time.sleep(1000)

kpts1 = None
# NOTE: uncomment to load a keypoints descriptor from file
#kpts1 = image.load_descriptor("/desc.orb")
#img = sensor.snapshot()
#draw_keypoints(img, kpts1)
#
# 这个例子展示了如何使用OpenMV内置的画线功能。
# 这个例子只是一个原始的测试,但是是一个很好的参考。
# 请把IDE设置为non-JPEG模式来观看最清晰的质量。

import sensor, image, time  #引入程序依赖的模块
sensor.reset()  #摄像头初始化
sensor.set_framesize(sensor.QVGA)  #设置图像像素大小为QVGA 320x120

# 所有的画图函数使用相同的代码传递颜色参数。
# 所以我们只需要测试一个函数

while (True):

    # Test Draw Line (GRAYSCALE)
    sensor.set_pixformat(sensor.GRAYSCALE)  #设置图像颜色格式为灰度图
    for i in range(10):  #循环变量i从0到9,共计10个数循环
        img = sensor.snapshot()  #每次循环截取一张图像
    for i in range(img.width()):  #img.width=320(即QVGA图像格式的宽度),
        #所以从0到319循环
        c = ((i * 255) + (img.width() / 2)) / img.width()
        img.draw_line((i, 0, i, img.height() - 1), color=int(c))
        #画线函数img.draw_line((x0, y0, x1, y1), color=White),从(x0,y0)
        #到(x1,y1)画一条直线。如果是灰度图,color是0-255的一个数,0代表黑,
        #255代表白;如果是RGB图像,color是(r,g,b)的一个元组,r g b分别代表红绿蓝。
    sensor.snapshot()
    time.sleep(1000)

    # Test Draw Line (RGB565)
    sensor.set_pixformat(sensor.RGB565)  #设置图像颜色格式为rgb图
    for i in range(10):
Example #6
0
import sensor, pyb, time

# Reset sensor
sensor.reset()

# Set sensor settings
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_gainceiling(16)
sensor.set_contrast(1)
sensor.set_framesize(sensor.QVGA)

# Enable JPEG and set quality
sensor.set_pixformat(sensor.JPEG)
sensor.set_quality(98)

# Red LED
led = pyb.LED(1)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
    sensor.snapshot()

# Turn on red LED and wait for a second
led.on()
time.sleep(1000)

# Write JPEG image to file
with open("/test.jpeg", "w") as f:
    f.write(sensor.snapshot())
# centroid of the largest blob in each roi. The x position of the centroids
# will then be averaged with different weights where the most weight is assigned
# to the roi near the bottom of the image and less to the next roi and so on.
ROIS = [ # [ROI, weight]
        (0, 100, 160, 20, 0.7), # You'll need to tweak the weights for your app
        (0,  50, 160, 20, 0.3), # depending on how your robot is setup.
        (0,   0, 160, 20, 0.1)
       ]

# Compute the weight divisor (we're computing this so you don't have to make weights add to 1).
weight_sum = 0
for r in ROIS: weight_sum += r[4] # r[4] is the roi weight.

# Camera setup...
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # use grayscale.
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock() # Tracks FPS.

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    centroid_sum = 0

    for r in ROIS:
        blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple.
Example #8
0
import pyb, sensor, image, os, time
sensor.reset()
sensor.set_framesize(sensor.QVGA)
if not "test" in os.listdir(): os.mkdir("test")
while(True):
    sensor.set_pixformat(sensor.GRAYSCALE)
    for i in range(2):
        img = sensor.snapshot()
        num = pyb.rng()
        print("Saving %d" % num)
        img.save("test/image-%d" % num)
        #
        img = sensor.snapshot()
        num = pyb.rng()
        print("Saving %d" % num)
        img.save("test/image-%d.bmp" % num)
        #
        img = sensor.snapshot()
        num = pyb.rng()
        print("Saving %d" % num)
        img.save("test/image-%d.pgm" % num)
        #
        img = sensor.snapshot()
        num = pyb.rng()
        print("Saving %d" % num)
        img.save("/test/image-%d" % num)
        #
        img = sensor.snapshot()
        num = pyb.rng()
        print("Saving %d" % num)
        img.save("/test/image-%d.bmp" % num)
Example #9
0
# Image Statistics Info Example
#
# This script computes the statistics of the image and prints it out.

import sensor, image, time, lcd
import KPU as kpu

# Init LCD
lcd.init(freq=15000000)

sensor.reset()
sensor.set_pixformat(sensor.RGB565)  # GRAYSCALE or RGB565.
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)
#sensor.set_auto_gain(False) # must be turned off for color tracking
#sensor.set_auto_whitebal(False) # must be turned off for color tracking
sensor.run(1)
# Clock
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    print(img.get_statistics())
    print(clock.fps())
    lcd.display(img)

# You can also pass get_statistics() an "roi=" to get just the statistics of that area.
# get_statistics() allows you to quickly determine the color channel information of
# any any area in the image.
Example #10
0
# Simle detection using Haar Cascade + CNN.
import sensor, time, image, os, nn

sensor.reset()                          # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.RGB565)     # Set pixel format to RGB565
sensor.set_framesize(sensor.QVGA)       # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)

# Load smile detection network
net = nn.load('/smile.network')

# Load Face Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# FPS clock
clock = time.clock()
while (True):
    clock.tick()

    # Capture snapshot
    img = sensor.snapshot()

    # Find faces.
    objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)

    # Detect smiles
    for r in objects:
        # Resize and center detection area
Example #11
0
# Untitled - By: Gehaha - 周日 11月 18 2018

import sensor, image, time

green_threshold = (0, 80, -70, -10, -0, 30)

#设置绿色的阈值,括号里面的数值分别是L A B 的最大值和最小值(minL, maxL, minA,
# maxA, minB, maxB),LAB的值在图像左侧三个坐标图中选取。如果是灰度图,则只需
#设置(min, max)两个数字即可。
sensor.reset()  # 初始化摄像头
sensor.set_pixformat(sensor.RGB565)  #格式为RGB565
sensor.set_framesize(sensor.QQVGA)  #使用qqvga速度快一些
sensor.skip_frames(time=2000)  #跳过2000s,使新设置生效,并且自动调节 白平衡
sensor.set_auto_whitebal(False)
clock = time.clock()  #追踪帧率

while (True):
    clock.tick()
    img = sensor.snapshot()
    blobs = img.find_blobs([green_threshold])

    if blobs:
        for b in blobs:
            img.draw_rectangle(b[0:4])
            img.draw_cross(b[5], b[6])
    print(clock.fps())
Example #12
0
import sensor,time,pyb,math
from pyb import Pin, Timer, LED, UART
#黑色点阈值
threshold = [(11, 25, -41, -19, -9, 41)]
#xy平面误差数据
err_x = 0
err_y = 0
#发送数据
uart_buf = bytearray([0x55,0xAA,0x00,0x00,0x00,0x00,0xAA])

#串口三配置
uart = UART(3, 115200)
uart.init(115200, bits=8, parity=None, stop=1)

sensor.reset()
sensor.set_pixformat(sensor.RGB565)#设置灰度信息
sensor.set_framesize(sensor.QQVGA)#设置图像大小
sensor.skip_frames(20)#相机自检几张图片
sensor.set_auto_whitebal(False)#关闭白平衡
clock = time.clock()#打开时钟

def order(blob):
    return blob.pixels()

"""几种思路方法:
1. 排序再获取两个
2. 获取第一大,再获取第二大
3. 获取矩形,再根据statistics获取颜色范围,三重循环
4. 把两个当作整体获取(ncc,color code)
"""
def findCubeCenter():
    """
    find the cube roi position
    """
    global roi
    sensor.set_auto_whitebal(False)
    sensor.set_contrast(2)

    cnt = 1
    LAB_THRESHOLD = (
        (0, 60, -40, 50, -40, 10),  # blue
        (0, 40, -50, 40, -60, 30),  # yellow orange red white
        (0, 50, -40, 15, -25, 70))
    #(0, 70, -25, 15, -60, 30)) # green

    CENTER_THRESHOLD = roi[2] / 3 / 2
    gain = 0
    while (True):

        if cnt > 12:
            sensor.set_auto_gain(gain)

        img = sensor.snapshot()

        if cnt % 60 == 0:
            cnt = 1
            gain += 10

        if (int(cnt / 24)) % 2 == 1:
            lab_threshold = LAB_THRESHOLD[int(cnt / 12) - 2]
            img = img.binary([lab_threshold])
            img = img.dilate(2)
            img = img.erode(2)

        lcd.display(img)

        center_roi = list(
            map(int, [
                roi[0] + roi[2] / 2 - CENTER_THRESHOLD * 2,
                roi[1] + roi[3] / 2 - CENTER_THRESHOLD * 2,
                CENTER_THRESHOLD * 4, CENTER_THRESHOLD * 4
            ]))
        squares = []
        for r in img.find_rects(roi=center_roi, threshold=500):
            if (isSquare(r)):
                squares.append(r)
                img = img.draw_rectangle(r.rect())
                for p in r.corners():
                    img = img.draw_circle(p[0], p[1], 5, color=(0, 255, 0))
                lcd.display(img)
                #time.sleep_ms(5000)
        if not squares:
            cnt += 1
            print(cnt)
        else:
            roi = findCenter(squares, roi, CENTER_THRESHOLD * math.sqrt(2))
            center_roi = list(
                map(int, [
                    roi[0] + roi[2] / 2 - CENTER_THRESHOLD * 2,
                    roi[1] + roi[3] / 2 - CENTER_THRESHOLD * 2,
                    CENTER_THRESHOLD * 4, CENTER_THRESHOLD * 4
                ]))
            img = img.draw_rectangle(center_roi)
            img = img.draw_rectangle(roi)

            lcd.display(img)

            sensor.reset()
            sensor.set_pixformat(sensor.RGB565)
            sensor.set_framesize(sensor.QQVGA)

            sensor.set_auto_whitebal(False)
            sensor.skip_frames(time=60)
            gain = sensor.get_gain_db()
            sensor.set_auto_gain(0, gain)
            sensor.skip_frames(time=60)
            sensor.set_auto_exposure(0, 80000)

            sensor.skip_frames(time=60)
            return 1
# will then be averaged with different weights where the most weight is assigned
# to the roi near the bottom of the image and less to the next roi and so on.
ROIS = [  # [ROI, weight]
    (0, 100, 160, 20, 0.1),  # You'll need to tweak the weights for your app
    (0, 050, 160, 20, 0.3),  # depending on how your robot is setup.
    (0, 000, 160, 20, 0.7)
]

# Compute the weight divisor (we're computing this so you don't have to make weights add to 1).
weight_sum = 0
for r in ROIS:
    weight_sum += r[4]  # r[4] is the roi weight.

# Camera setup...
sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # use grayscale.
sensor.set_framesize(sensor.QQVGA)  # use QQVGA for speed.
sensor.skip_frames(30)  # Let new settings take affect.
sensor.set_auto_gain(False)  # must be turned off for color tracking
sensor.set_auto_whitebal(False)  # must be turned off for color tracking
clock = time.clock()  # Tracks FPS.

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.

    centroid_sum = 0
    for r in ROIS:
        blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4],
                               merge=True)  # r[0:4] is roi tuple.
        if blobs:
# Optical Flow Example
#
# Your OpenMV Cam can use optical flow to determine the displacement between
# two images. This allows your OpenMV Cam to track movement like how your laser
# mouse tracks movement. By tacking the difference between successive images
# you can determine instaneous displacement with your OpenMV Cam too!

import sensor, image, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.B64x32) # or B40x30 or B64x64
clock = time.clock() # Tracks FPS.

# NOTE: The find_displacement function works by taking the 2D FFTs of the old
# and new images and compares them using phase correlation. Your OpenMV Cam
# only has enough memory to work on two 64x64 FFTs (or 128x32, 32x128, or etc).
old = sensor.snapshot()

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    [delta_x, delta_y, response] = old.find_displacement(img)

    old = img.copy()

    print("%0.1f X\t%0.1f Y\t%0.2f QoR\t%0.2f FPS" % \
        (delta_x, delta_y, response, clock.fps()))
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. This
# example is advanced because it preforms a background update to deal with the
# backgound image changing overtime.

import sensor, image, pyb, os, time

TRIGGER_THRESHOLD = 5

BG_UPDATE_FRAMES = 50 # How many frames before blending.
BG_UPDATE_BLEND = 128 # How much to blend by... ([0-256]==[0.0-1.0]).

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.

if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory

print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")

triggered = False

frame_count = 0
Example #17
0
import sensor, image, time, lcd
from fpioa_manager import fm
from board import board_info
from Maix import GPIO
import time

num = 0
switch_status = 0
fm.register(board_info.BOOT_KEY, fm.fpioa.GPIO1, force=True)
fm.register(board_info.ENTER, fm.fpioa.GPIOHS10, force=True)
key_shot = GPIO(GPIO.GPIOHS10, GPIO.IN)
repl_unlock = GPIO(GPIO.GPIO1, GPIO.IN)
lcd.init(freq=15000000)
sensor.reset()

sensor.set_pixformat(sensor.YUV422)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)
clock = time.clock()

while (repl_unlock.value() != 0):
    clock.tick()
    img = sensor.snapshot()
    if key_shot.value() == 0:
        path = "/sd/kamera-" + str(num) + ".jpg"
        lcd.draw_string(80, 40, "Saved :)", lcd.RED, lcd.WHITE)
        time.sleep(1)
        img.save(path)
        num += 1
    else:
        lcd.display(img)
Example #18
0
# Frame Differencing

import sensor, image, pyb, time

BLUE_LED_PIN = 3  # operation indicator

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # grayscale image
sensor.set_framesize(sensor.B64X64)  # 80*60 resolution
sensor.skip_frames(time=2000)  # Let new settings take affect.
sensor.set_auto_whitebal(False)  # Turn off white balance.
clock = time.clock()  # Tracks FPS.

# Take from the main frame buffer's RAM to allocate two extra frame buffer.
buffer1 = sensor.alloc_extra_fb(sensor.width(), sensor.height(),
                                sensor.GRAYSCALE)
buffer2 = sensor.alloc_extra_fb(sensor.width(), sensor.height(),
                                sensor.GRAYSCALE)

sensor.skip_frames(time=500)  # Give the user time to get ready.
buffer1.replace(sensor.snapshot())  # Capture the first frame.

oddframe = True  # Tracks if the frame number is odd or not.

pyb.LED(BLUE_LED_PIN).on()  # indicator on

for i in range(3000):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.
    if (oddframe):
        oddframe = False
'''
实验名称:画各种图形和写字符
版本: v1.0
日期: 2019.12
作者: 01Studio
'''

import sensor, image, time, lcd

lcd.init(freq=15000000)
sensor.reset()  #复位摄像头
#sensor.set_vflip(1)                 #将摄像头设置成后置方式(所见即所得)

sensor.set_pixformat(sensor.RGB565)  # 设置像素格式 RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # 设置帧尺寸 QVGA (320x240)
sensor.skip_frames(time=2000)  # 灯带设置响应.
clock = time.clock()  # 新建一个时钟对象计算FPS.

while (True):
    clock.tick()
    img = sensor.snapshot()

    # 画线段:从 x0, y0 到 x1, y1 坐标的线段,颜色红色,线宽度 2。
    img.draw_line(20, 20, 100, 20, color=(255, 0, 0), thickness=2)

    #画矩形:绿色不填充。
    img.draw_rectangle(150,
                       20,
                       100,
                       30,
                       color=(0, 255, 0),
# can then classify as a marker.

import sensor, image, time

# For color tracking to work really well you should ideally be in a very, very,
# very, controlled enviroment where the lighting is constant. Additionally, if
# you want to track more than 2 colors you need to set the boundaries for them
# very narrowly. If you try to track... generally red, green, and blue then
# you will end up just tracking everything which you don't want.
red_threshold   = (  40,   60,   60,   90,   50,   70)
blue_threshold  = (   0,   20,  -10,   30,  -60,   10)
# You may need to tweak the above settings for tracking red and blue things...
# Select an area in the Framebuffer to copy the color settings.

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # use RGB565.
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
sensor.skip_frames(10) # Let new settings take affect.
sensor.set_whitebal(False) # turn this off.
clock = time.clock() # Tracks FPS.

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    blobs = img.find_blobs([red_threshold, blue_threshold])
    merged_blobs = img.find_markers(blobs)
    if merged_blobs:
        for b in merged_blobs:
            # Draw a rect around the blob.
            img.draw_rectangle(b[0:4]) # rect
Example #21
0
import sensor
sensor.set_contrast(1)
sensor.set_gainceiling(8)

sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.JPEG)
sensor.set_quality(98)

with open("/test.jpeg", "w") as f:
    f.write(sensor.snapshot())
# Basic Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.

import sensor, image, pyb, os, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.

if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory

print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    # Replace the image with the "abs(NEW-OLD)" frame difference.
    img.difference("temp/bg.bmp")
Example #23
0
import sensor, image, time

def draw_lines(x, y):
    centerX = 80
    centerY = 60
    img.draw_line(x, y - 20, x, y + 20, color = (255, 0, 0), thickness = 5)
    img.draw_line(x - 20, y, x + 20, y, color = (255, 0, 0), thickness = 5)

    img.draw_line(centerX, centerY - 20, centerX, centerY + 20, color = (255, 0, 0), thickness = 5)
    img.draw_line(centerX - 20, centerY, centerX + 20, centerY, color = (255, 0, 0), thickness = 5)

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7)
#GRAYSCALE, RGB565,BAYER
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
RANGES = [(9, 100, -128, -12, -47, 40)]
RANGES2 = [ (0,100,-100,100,-100,100)]

clock = time.clock()
sensor.set_auto_whitebal(False)
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False, exposure_us=100) # make smaller to go faster
while(True):
    clock.tick()
    img = sensor.snapshot()
    target_found = False
    x = -1
    y = -1
    width = -1
    for b in img.find_blobs( RANGES ):
# IR Beacon Grayscale Tracking Example
#
# This example shows off IR beacon Grayscale tracking using the OpenMV Cam.

import sensor, image, time

thresholds = (255, 255) # thresholds for bright white light from IR.

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" merges all overlapping blobs in the image.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True):
        ratio = blob.w() / blob.h()
        if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs
            img.draw_rectangle(blob.rect())
            img.draw_cross(blob.cx(), blob.cy())
    print(clock.fps())
Example #25
0
def face_recog(calc_time):
    pin = pyb.millis()
    print(pin)
    cc = 0
    #pyb.elapsed_millis(start)
    while pyb.elapsed_millis(pin) < calc_time:
        print("top of face recog function")
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset() # Initialize the camera sensor.
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others)
        sensor.skip_frames(time = 2000) # Let new settings take affect.
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time = 2000) # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10 # We'll say we detected a face after 10 frames.
        while(diff):
                img = sensor.snapshot()
                faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
        pyb.LED(BLUE_LED_PIN).off()
        print("Face detected! Saving image...")
        pic_name = "snapshot-person.pgm"
        sensor.snapshot().save(pic_name) # Save Pic. to root of SD card -- uos.chdir("/")
        pyb.delay(100)
        snap_img = image.Image(pic_name).mask_ellipse()
        d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
        # face recognition
        pyb.LED(2).on()
        name_lbp_list = []
        uos.chdir("/Faces") # change directory to where all the webex photos from tcp are stored
        for filename in uos.listdir("/Faces"):
            if filename.endswith(".pgm") :
                try:
                    img = None
                    img = image.Image(filename).mask_ellipse()
                    d1 = img.find_lbp((0, 0, img.width(), img.height()))
                    dist = image.match_descriptor(d0, d1,50)
                    word = filename
                    #print(filename)
                    und_loc = word.index('_')
                    word = word[0:(und_loc)]
                    name_lbp_list.append(word)
                    name_lbp_list.append(dist)
                    continue
                except Exception as e:
                    print(e)
                    print("error reading file")
            else:
                print("ERROR")
        print(name_lbp_list)
        #print(len(name_lbp_list))
        end = 0
        name_avg = []
        i = 0
        start = 0
        while i < len(name_lbp_list):
            if ( (i+2) < len(name_lbp_list)) and (name_lbp_list[i] != name_lbp_list[i+2] ) :
                end = i+2
                #print(start)
                #print(end)
                face = []
                face = name_lbp_list[start:end]
                print(face)
                j = 1
                sum_lbp = 0
                while j < len(face):
                    sum_lbp += face[j]
                    j += 2
                name_avg.append(face[0])
                name_avg.append(sum_lbp/(len(face)/2))
                start = i+2
            i += 2
        face = []
        face = name_lbp_list[(end):(len(name_lbp_list))]
        print(face)
        j = 1
        sum_lbp = 0
        while j < len(face):
            sum_lbp += face[j]
            j += 2
        name_avg.append(face[0])
        name_avg.append(sum_lbp/(len(face)/2))
        print(name_avg)
        lbps = []
        k = 1
        while k < len(name_avg):
            lbps.append(name_avg[k])
            k +=2
        print(lbps)
        #print(len(lbps))
        min_lbp = min(lbps)
        print(min_lbp)
        ind = lbps.index(min(lbps))
        #print(ind)
        ind += 1
        found_person = name_avg[2*ind - 2]
        id_name = "The person you are looking at is: " + found_person
        print(id_name)
        #delete snapshot of person
        uos.remove("/snapshot-person.pgm")
        pyb.LED(2).off()
        cc += 1
        print(cc)
Example #26
0
# Dataset Capture Script - By: yorbengoorzottegem9620 - Fri Dec 4 2020

# Use this script to control how your OpenMV Cam captures images for your dataset.
# You should apply the same image pre-processing steps you expect to run on images
# that you will feed to your model during run-time.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565)  # Modify as you like.
sensor.set_framesize(sensor.QVGA)  # Modify as you like.
sensor.skip_frames(time=2000)

clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    # Apply lens correction if you need it.
    # img.lens_corr()
    # Apply rotation correction if you need it.
    # img.rotation_corr()
    # Apply other filters...
    # E.g. mean/median/mode/midpoint/etc.
    print(clock.fps())
Example #27
0
import sensor, image, pyb, os, time

TRIGGER_THRESHOLD = 5

width_frame = 320  #max for QVGA 320
height_frame = 240  #max for QVGA 240

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # or sensor.GRAYSCALE RGB565
sensor.set_framesize(sensor.QVGA)  # or sensor.QQVGA (or others)
sensor.set_windowing(
    (width_frame,
     height_frame))  # look at center 240x240 pixels of the VGA resolution.

sensor.skip_frames(time=200)  # Let new settings take affect.
sensor.set_auto_whitebal(False)  # Turn off white balance.
sensor.set_auto_gain(False)

clock = time.clock()  # Tracks FPS.

#extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb = sensor.alloc_extra_fb(width_frame, height_frame, sensor.GRAYSCALE)

print("About to save background image...")
sensor.skip_frames(time=200)  # Give the user time to get ready.
extra_fb.replace(sensor.snapshot())
print("Saved background image - Now frame differencing!")

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.
# Grayscale Filter Example
#
# The sensor module can preform some basic image processing while it is reading
# the image in. This example shows off how to apply grayscale thresholds.
#
# WARNING - THIS FEATURE NEEDS TO BE RE-WORKED. THE API MAY CHANGE IN THE
# FUTURE! Please use the binary function for image segmentation if possible.

import sensor, image, time

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA)  # or sensor.QQVGA (or others)
sensor.skip_frames(10)  # Let new settings take affect.
clock = time.clock()  # Tracks FPS.

# Segment the image by following thresholds. This segmentation is done while
# the image is being read in so it does not cost any additional time...
sensor.set_image_filter(sensor.FILTER_BW, lower=128, upper=255)

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.
    print(clock.fps())  # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Example #29
0
# if backliht cannt povide suficient brightness, increaase  user_exposure_time
# Tested system is lighted by 4 x 0.5W LED-s placed on difusser corners
# any other type of backlight illumination should be suffucuent (xenon lamp, continious backligjht etc.)

####### #please don't chane anythin pass this line
last_proximity_state = 1  #remembering last state from proximity sensor, set to HIGH /1 (reverse logic)

#Use bilt in LED, usefull for debugging purposes
# please note that additional light source reflected from polished bottle surface can confuse camera
red_led = LED(
    1
)  # use only for debugging / camera status, see previous line / not really used in code

sensor.reset()  # Initialize the camera sensor
sensor.set_pixformat(
    sensor.GRAYSCALE
)  # set camera format to grayscale (color not important in this example)
sensor.set_framesize(sensor.QVGA)  # set camera resolution to QVGA 320 x 240
sensor.set_auto_exposure(
    False, exposure_us=user_exposure_time
)  # set exposure time, user changable (user_exposure_time variable)
sensor.set_auto_gain(
    False, gain_db=10
)  #set camera gain, keep it as this value is optimised for given light source and exposure time

#these setting are manually set in order to prevent camera to change it during use (no automatic setting in machine vision!)
sensor.set_brightness(2)  #set camera brightness
sensor.set_contrast(3)  #set camera contrast
sensor.set_saturation(0)  #set camera saturation

###################################################
Example #30
0
def jpeg_image_snapshot(data):
    pixformat, framesize = bytes(data).decode().split(",")
    sensor.set_pixformat(eval(pixformat))
    sensor.set_framesize(eval(framesize))
    img = sensor.snapshot().compress(quality=90)
    return struct.pack("<I", img.size())
Example #31
0
# Specify communication method: "print" "usb" "can"
COMMS_METHOD = "print"
TARGET_WIDTH = 39.25
TARGET_HEIGHT = 17.00
HISTORY_LENGTH = 10

# make USB_VCP object
usb = USB_VCP()
led = pyb.LED(3)

SCREEN_CENTERP = 160  # screen center (pixels) horizontal
VERTICAL_CENTERP = 120  # screen center (pixels) vertical

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565)  # or sensor.RGB565
sensor.set_framesize(sensor.QVGA)  # or sensor.QVGA (or others)
sensor.skip_frames(time=2000)  # Let new settings take affect.
clock = time.clock()

# setting autoexposure automatically
KMAN = 0.065  # constant for exposure setting
autoExposureSum = 0
readExposureNum = 10
for i in range(readExposureNum):
    autoExposureSum += sensor.get_exposure_us()

autoExposure = autoExposureSum / readExposureNum
manualExposure = int(autoExposure *
                     KMAN)  # scale factor for decreasing autoExposure
sensor.set_auto_exposure(False, manualExposure)  # autoset exposures
# Image Histogram Info Example
#
# This script computes the histogram of the image and prints it out.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE) # or RGB565.
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot()
    # Gets the grayscale histogram for the image into 8 bins.
    # Bins defaults to 256 and may be between 2 and 256.
    print(img.get_histogram(bins=8))
    print(clock.fps())

# You can also pass get_histogram() an "roi=" to get just the histogram of that area.
# get_histogram() allows you to quickly determine the color channel information of
# any any area in the image.
enable_lens_corr = True # turn on for straighter lines...

import sensor, image, time
#import maths library
from math import sqrt
import pyb

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

min_degree = 0
max_degree = 179

#Coordinates of detected lines
start_x,start_y = 0,0
end_x,end_y = 0,0
mid_x,mid_y = 0,0
line_tuple = None

#Reference Coordinates
reference_x = 60
reference_y = 80

#Distance for lane control
lane_control_distance = 0

#LED Definition
red_led = pyb.LED(1)
# Find Circles Example
#
# This example shows off how to find circles in the image using the Hough
# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform
#
# Note that the find_circles() method will only find circles which are completely
# inside of the image. Circles which go outside of the image/roi are ignored...

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot().lens_corr(1.8)

    # Circle objects have four values: x, y, r (radius), and magnitude. The
    # magnitude is the strength of the detection of the circle. Higher is
    # better...

    # `threshold` controls how many circles are found. Increase its value
    # to decrease the number of circles detected...

    # `x_margin`, `y_margin`, and `r_margin` control the merging of similar
    # circles in the x, y, and r (radius) directions.

    for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10):
Example #35
0
# TensorFlow Lite Person Dection Example
#
# Google's Person Detection Model detects if a person is in view.
#
# In this example we slide the detector window over the image and get a list
# of activations. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, tf

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240))  # Set 240x240 window.
sensor.skip_frames(time=2000)  # Let the camera adjust.

# Load the built-in person detection network (the network is in your OpenMV Cam's firmware).
labels, net = tf.load_builtin_model('person_detection')

clock = time.clock()
while (True):
    clock.tick()

    img = sensor.snapshot()

    # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not
    # specified). A classification score output vector will be generated for each location. At each scale the
    # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide.
    # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
    # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after
Example #36
0
# High FPS Example
#
# This example shows off how to make the frame rate of the global shutter camera extremely
# high. To do so you need to set the resolution to a low value such that pixel binning is
# activated on the camera and then reduce the maximum exposure time.
#
# When the resolution is 320x240 or less the camera reads out pixels 2x faster. When the
# resolution is 160x120 or less the camera reads out pixels 4x faster. This happens due
# to pixel binning which is automatically activated for you to increase the readout speed.
#
# While the readout speed may increase the camera must still expose the image for the request
# time so you will not get the maximum readout speed unless you reduce the exposure time too.
# This results in a dark image however so YOU NEED A LOT of lighting for high FPS.

import sensor, image, time

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
sensor.set_framesize(sensor.QQVGA)  # Set frame size to QQVGA (160x120) - make smaller to go faster
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.

sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster

while(True):
    clock.tick()                    # Update the FPS clock.
    img = sensor.snapshot()         # Take a picture and return the image.
    print(clock.fps())              # Note: OpenMV Cam runs about half as fast when connected
                                    # to the IDE. The FPS should increase once disconnected.
Example #37
0
def cal():
    flag=0
    zfx=0
    yx=0
    sjx=0
    r=[0,0,0,0]
    key = 0
    G=0
    while(True):
        key=uart.readchar()
        if key==1:
            break
        sum_zfx=0
        sum_yx=0
        sum_sjx=0
        dis=0
        clock.tick()
        img = sensor.snapshot(1.8)
        #img1 = img.binary(blue)

        for x in templates :
            img = sensor.snapshot(1.8)
            img = img.to_grayscale()
            flag = 0
            for t in x:
                clock.tick()
                img = sensor.snapshot(1.8)
                img = img.to_grayscale()

                template = image.Image(t)
                #ball = image.Image(t)
                if x == zfx_tempaltes:
                    r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
                    if r:
                        print(t)
                        zfx = r
                        sum_zfx=sum_zfx+1
                elif x == yx_tempaltes:
                    for c in img.find_circles(threshold = 3500, x_margin = 10, y_margin = 10, r_margin = 10,r_min = 2, r_max = 100, r_step = 2):
                        img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
                        if c.r()>1:
                            x=c.x()-c.r()
                            y=c.y()-c.r()
                            w=c.r()*2
                            h=c.r()*2
                            r=[x,y,w,h]
                            yx = r
                            sum_yx=20
                elif x == sjx_tempaltes:
                    r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
                    if r:
                        print(t)
                        sjx = r
                        sum_sjx=sum_sjx+1
        if (sum_zfx>sum_yx and sum_zfx>sum_sjx) :
            r=zfx
            t=8#"zfx"
        elif (sum_yx>sum_zfx and sum_yx>sum_sjx) :
            r=yx
            t=9#"yx"
        else:
            r=sjx
            t=10#"sjx"
        if (sum_zfx!=0 or sum_yx!=0 or sum_sjx!=0):

            #change[0]=r[0]+0
            #change[1]=r[1]+0
            #change[2]=r[2]-0
            #change[3]=r[3]-0
            sum_red=0
            sum_green=0
            sum_blue=0
            x=r[0]
            y=r[1]
            w=r[2]
            h=r[3]
            center_x=r[0]+int(r[2]/2)
            center_y=r[1]+int(r[3]/2)
            sensor.reset()
            sensor.set_pixformat(sensor.RGB565)
            sensor.set_framesize(sensor.QQVGA)
            sensor.skip_frames(time = 300)
            sensor.set_auto_gain(False) # must be turned off for color tracking
            sensor.set_auto_whitebal(False) # must be turned off for color tracking
            sensor.set_vflip(False)
            sensor.set_hmirror(False)
            img = sensor.snapshot(1.8)
            #r=list(r)

            i=3
            while(i>0):
                blobs = img.find_blobs(blue,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    #img.draw_cross(center_x, center_y) # cx, cy
                    img.draw_cross(max_blob.cx(), max_blob.cy())
                    #img.draw_line(x+int(w/2),y,x,y+h)
                    #img.draw_line(x,y+h,x+w,y+h)
                    #img.draw_line(x+w,y+h,x+int(w/2),y)#三角形

                    img.draw_circle(x+int(w/2),y+int(h/2),int(w/2))
                    sum_blue=sum_blue+1

                blobs = img.find_blobs(red,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    img.draw_cross(center_x, center_y) # cx, cy
                    img.draw_circle(x+int(w/2),y+int(h/2),int(h/2))
                    sum_red=sum_red+1



                blobs = img.find_blobs(green,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    img.draw_cross(center_x, center_y) # cx, cy
                    sum_green=sum_green+1
                i=i-1

            if (sum_red>sum_green and sum_red>sum_blue) :
                flag=5#"red"
            elif (sum_green>sum_red and sum_green>sum_blue) :
                flag=6#"green"
            elif (sum_blue>sum_red and sum_blue>sum_green):
                flag=7#"blue"
            else :
                flag = 0

        if(r==0 or flag == 0):
            print("没找到")
        else:
            Lm = int(r[2]/2)
            K = 25
            G=1
            length = K/Lm
            #edge =
            print("length:",length)
            print("color:",flag,"object:",t,"range:",r,"red:",sum_red,
                    "green:",sum_green,"blue:",sum_blue,"zfx_model:",sum_zfx,"yx_model:",
                    sum_yx,"sjx_model:",sum_sjx)
            uart.writechar(0x55)
            uart.writechar(0x53)
            uart.writechar(flag)
            uart.writechar(t)
            uart.writechar(Lm)
            uart.writechar(K)
            uart.writechar(G)
            uart.writechar(1)
            G=0
            break
Example #38
0
rst.high()
time.sleep(100)

write_command(0x11) # Sleep Exit
time.sleep(120)

# Memory Data Access Control
write_command(0x36, 0xC0)

# Interface Pixel Format
write_command(0x3A, 0x05)

# Display On
write_command(0x29)

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # must be this
sensor.set_framesize(sensor.QQVGA2) # must be this
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    write_command(0x2C) # Write image command...
    write_image(img)

    print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Example #39
0
# Mean Filter Example
#
# This example shows off mean filtering. Mean filtering is your standard average
# filter in a NxN neighborhood. Mean filtering removes noise in the image by
# bluring everything. But, it's the fastest kernel filter operation.

import sensor, image, time

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA)  # or sensor.QVGA (or others)
sensor.skip_frames(time=2000)  # Let new settings take affect.
clock = time.clock()  # Tracks FPS.

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.

    # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2
    # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
    # shouldn't ever need to use a value bigger than 2.
    img.mean(1)

    print(clock.fps())  # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Example #40
0
# Mjpeg recording example:
#
# You can use your OpenMV Cam to record mjpeg files. You can either feed the
# recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished
# recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then
# the built-in video player will work too.

import sensor, image, time, mjpeg

sensor.reset()
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565) # you can also use grayscale

# Warm up the cam
for i in range(10):
    sensor.snapshot()

# FPS clock
clock = time.clock()
mjpeg = mjpeg.Mjpeg("/test.mjpeg") # video setup to use current resolution

for i in range(100):
    clock.tick()
    img = sensor.snapshot()
    mjpeg.add_frame(img)
    # Print FPS.
    # Note: Actual FPS is higher, the IDE slows down streaming.
    print(clock.fps())

mjpeg.close(clock.fps())
print("done")
Example #41
0
import sensor, image, time, random
import car, hand
import math
from pid import PID

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565)  # use RGB565.
sensor.set_framesize(sensor.QQVGA)  # use QQVGA for speed.
sensor.skip_frames(60)  # Let new settings take affect.
sensor.set_auto_whitebal(False)  # turn this off.
clock = time.clock()  # Tracks FPS.

#x_pid = PID(p=1, i=6,d=0.5, imax=100)
x_pid = PID(p=1, i=3, d=0.5, imax=100)
y_pid = PID(p=0.1, i=0.1, d=0.1, imax=40)
'''
red_threshold     = (46, 100, 40, 111, -11, 64)
#red_threshold     = (46, 77, 53, 111, -11, 64)
green_threshold   = (32, 92, -69, -34, -1, 39)
#green_threshold   = (32, 100, -77, -24, -24, 22)
blue_threshold    = (32, 77, -12, 27, -69, -25)
black_threshold   = (0, 25, -17, 15, -18, 15)
'''
red_threshold = (43, 74, 27, 77, -8, 50)
green_threshold = (48, 70, -78, -40, 35, 60)
#green_threshold   = (32, 100, -77, -24, -24, 22)
blue_threshold = (34, 100, -55, 3, -51, -6)
black_threshold = (0, 25, -17, 15, -18, 15)

#ball_threshold    =  red_threshold
ball_threshold = blue_threshold
Example #42
0
# Cartoon Filter
#
# This example shows off a simple cartoon filter on images. The cartoon
# filter works by joining similar pixel areas of an image and replacing
# the pixels in those areas with the area mean.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()

    # seed_threshold controls the maximum area growth of a colored
    # region. Making this larger will merge more pixels.

    # floating_threshold controls the maximum pixel-to-pixel difference
    # when growing a region. Settings this very high will quickly combine
    # all pixels in the image. You should keep this small.

    # cartoon() will grow regions while both thresholds are statisfied...

    img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05)

    print(clock.fps())
Example #43
0
# Find Circles Example
#
# This example shows off how to find circles in the image using the Hough
# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform
#
# Note that the find_circles() method will only find circles which are completely
# inside of the image. Circles which go outside of the image/roi are ignored...

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot().lens_corr(1.8)

    # Circle objects have four values: x, y, r (radius), and magnitude. The
    # magnitude is the strength of the detection of the circle. Higher is
    # better...

    # `threshold` controls how many circles are found. Increase its value
    # to decrease the number of circles detected...

    # `x_margin`, `y_margin`, and `r_margin` control the merging of similar
    # circles in the x, y, and r (radius) directions.

    # r_min, r_max, and r_step control what radiuses of circles are tested.
Example #44
0
# Find Rects Example
#
# This example shows off how to find rectangles in the image using the quad threshold
# detection code from our April Tags code. The quad threshold detection algorithm
# detects rectangles in an extremely robust way and is much better than Hough
# Transform based methods. For example, it can still detect rectangles even when lens
# distortion causes those rectangles to look bent. Rounded rectangles are no problem!
# (But, given this the code will also detect small radius circles too)...

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster (160x120 max on OpenMV-M7)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot()

    # `threshold` below should be set to a high enough value to filter out noise
    # rectangles detected in the image which have low edge magnitudes. Rectangles
    # have larger edge magnitudes the larger and more contrasty they are...

    for r in img.find_rects(threshold = 10000):
        img.draw_rectangle(r.rect(), color = (255, 0, 0))
        for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0))
        print(r)

    print("FPS %f" % clock.fps())
Example #45
0
from pid import PID
from pyb import Servo

pan_servo = Servo(1)
tilt_servo = Servo(2)

red_threshold = (13, 49, 18, 61, 6, 47)

pan_pid = PID(p=0.07, i=0, imax=90)  #脱机运行或者禁用图像传输,使用这个PID
tilt_pid = PID(p=0.05, i=0, imax=90)  #脱机运行或者禁用图像传输,使用这个PID
#pan_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID
#tilt_pid = PID(p=0.1, i=0, imax=90)#在线调试使用这个PID

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # use RGB565.
sensor.set_framesize(sensor.QVGA)  # use QQVGA for speed.
sensor.set_vflip(True)
sensor.skip_frames(10)  # Let new settings take affect.
sensor.set_auto_whitebal(False)  # turn this off.
clock = time.clock()  # Tracks FPS.
face_cascade = image.HaarCascade("frontalface", stages=25)


def find_max(blobs):
    max_size = 0
    for blob in blobs:
        if blob[2] * blob[3] > max_size:
            max_blob = blob
            max_size = blob[2] * blob[3]
    return max_blob
# CIFAR-10 Search Just Center Example
#
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
# different object types and works on RGB video data.
#
# In this example we slide the LeNet detector window over the image and get a list of activations
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, nn

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))       # Set 128x128 window.
sensor.skip_frames(time=750)           # Don't let autogain run very long.
sensor.set_auto_gain(False)            # Turn off autogain.
sensor.set_auto_exposure(False)        # Turn off whitebalance.

# Load cifar10 network (You can get the network from OpenMV IDE).
net = nn.load('/cifar10.network')
# Faster, smaller and less accurate.
# net = nn.load('/cifar10_fast.network')
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

clock = time.clock()
while(True):
    clock.tick()

    img = sensor.snapshot()
Example #47
0
# here we always choose the QVGA format (320x240) inside a VGA image
#if this is changed, the camera have to calibrated again
# also, the logic of mask_height should be checked
img_width = 320
img_height = 240

#additionnal data for the diff mask
if sensor_format == sensor.RGB565:
    mask_height = int(img_height / 8)
else:
    mask_height = int(img_height / 4)

# Initialize the camera sensor
sensor.reset()
sensor_size = sensor.VGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
# use a QVGA image at the center of the VGA image
sensor.set_windowing((int(
    (sensor.width() - img_width) / 2), int(
        (sensor.height() - img_height) / 2), img_width, img_height))

# wait a bit a get a snapshot
sensor.skip_frames(time=2000)
sensor.snapshot()

#get the gains and exposure
gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
print("exposure is " + str(exposure_us))
rgb_gain_db = sensor.get_rgb_gain_db()
import sensor, image, time

# NOTE!!! You have to use a small power of 2 resolution when using
# find_displacement(). This is because the algorithm is powered by
# something called phase correlation which does the image comparison
# using FFTs. A non-power of 2 resolution requires padding to a power
# of 2 which reduces the usefulness of the algorithm results. Please
# use a resolution like B128X128 or B128X64 (2x faster).

# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000)        # Wait for settings take effect.
clock = time.clock()                   # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.
Example #49
0
# Circle Drawing
#
# This example shows off drawing circles on the OpenMV Cam.

import sensor, image, time, pyb

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)  # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA)  # or QQVGA...
sensor.skip_frames(time=2000)
clock = time.clock()

while (True):
    clock.tick()

    img = sensor.snapshot()

    for i in range(10):
        x = (pyb.rng() % (2 * img.width())) - (img.width() // 2)
        y = (pyb.rng() % (2 * img.height())) - (img.height() // 2)
        radius = pyb.rng() % (max(img.height(), img.width()) // 2)

        r = (pyb.rng() % 127) + 128
        g = (pyb.rng() % 127) + 128
        b = (pyb.rng() % 127) + 128

        # If the first argument is a scaler then this method expects
        # to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple.
        img.draw_circle(x, y, radius, color=(r, g, b), thickness=2, fill=False)

    print(clock.fps())
#
# This example shows off single color code tracking using the OpenMV Cam.
#
# A color code is a blob composed of two or more colors. The example below will
# only track colored objects which have both the colors below in them.

import sensor, image, time, math

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
# The below thresholds track in general red/green things. You may wish to tune them...
thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0)
              (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1)
# Codes are or'ed together when "merge=True" for "find_blobs".

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True):
        if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0)
Example #51
0
from pyb import UART
uart = UART(3,9600, timeout_char = 1000)
# Reset sensor
led = pyb.LED(1)
led2 = pyb.LED(2)
led3 = pyb.LED(3)
sensor.reset()

# Set sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
# Max resolution for template matching with SEARCH_EX is QQVGA
sensor.set_framesize(sensor.QQVGA)
# You can set windowing to reduce the search image.
#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60))
sensor.set_pixformat(sensor.GRAYSCALE) # Configuramos escala de grises

# Load template.
# Template should be a small (eg. 32x32 pixels) grayscale image.
templateH = image.Image("/exampleH1.pgm") # Abrimos archivo H
templateS = image.Image("/exampleS1.pgm") # Abrimos archivo S
templateU = image.Image("/exampleU1.pgm") # Abrimos archivo U
tim = Timer(4, freq=1000) # Frequency in Hz
clock = time.clock()

# Run template matching
while (True):
    clock.tick()
    img = sensor.snapshot()
    r = img.find_template(templateH, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
    p = img.find_template(templateS, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))