import sensor, pyb, math, time, image
from pyb import LED, Pin, Timer, UART

sensor.reset()  #初始化摄像头
sensor.set_pixformat(sensor.GRAYSCALE)  #设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种
sensor.set_framesize(sensor.QQVGA)
#设置图像像素大小,sensor.QQVGA: 160x120,sensor.QQVGA2: 128x160 (一般用于LCD
#扩展板),sensor.QVGA: 320x240,sensor.QQCIF: 88x72,sensor.QCIF: 176x144,sensor.CIF: 352x288
sensor.skip_frames(20)  #设置跳过的帧数 稳定设置,默认300
#指示灯
LED_Red = pyb.LED(1)
LED_Green = pyb.LED(1)
LED_Blue = pyb.LED(3)
#黑色点阈值
black_threshold = [(0, 64)]
#xy平面误差数据
err_x = 0
err_y = 0
#高度
high = 0
scrc = 0
rebuf_temp = 0
#计数器
m = 0
error_sta = 0
timercnt = 0
timerflag = 0
#发送数据
uart_buf = bytearray([
    0x55, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAA
Beispiel #2
0
# NOTE!!! You have to use a small power of 2 resolution when using
# find_displacement(). This is because the algorithm is powered by
# something called phase correlation which does the image comparison
# using FFTs. A non-power of 2 resolution requires padding to a power
# of 2 which reduces the usefulness of the algorithm results. Please
# use a resolution like B64X64 or B64X32 (2x faster).

# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to RGB565 or GRAYSCALE
sensor.set_framesize(sensor.B64X64)  # Set frame size to 64x64
sensor.skip_frames(time=2000)  # Wait for settings take effect.
clock = time.clock()  # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(),
                                 sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.

    displacement = extra_fb.find_displacement(img)
    extra_fb.replace(img)

    # Offset results are noisy without filtering so we drop some accuracy.
# QRCode Example
#
# This example shows the power of the OpenMV Cam to detect QR Codes
# using lens correction (see the qrcodes_with_lens_corr.py script for higher performance).

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)  # can be QQVGA on M7...
sensor.skip_frames(30)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    img.lens_corr(1.8)  # strength of 1.8 is good for the 2.8mm lens.
    for code in img.find_qrcodes():
        print(code)
    print(clock.fps())
blindness_array = [1000, 1500, 2000, 2500, 3000, 3500, 4000, 5000, 9999]

# set blindness_time manually
blindness_time = 3000

# flash light to signal camera activation
pyb.LED(BLUE_LED_PIN).on()

# print message to serial monitor
print("Testing blindness for: %4d ms" % blindness_time)

# initialize camera and define camera settings
sensor.reset()  # initialize the camera sensor
sensor.set_pixformat(sensor.RGB565)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.VGA)  # or sensor.QVGA (or others)
sensor.skip_frames(
    time=blindness_time)  # let new settings take effect ("be blind")

# print snapshot start message to serial monitor
print("Taking photo . . . ")

# actually take photo and save to SD card
sensor.snapshot().compressed(quality=75).save("blindness_%04d_ms.jpg" %
                                              blindness_time)

# print snapshot end message to serial monitor
print("Photo complete!")

# turn camera indicator off
pyb.LED(BLUE_LED_PIN).off()

# pause to allow time for processing
# CIFAR-10 Search Just Center Example
#
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
# different object types and works on RGB video data.
#
# In this example we slide the LeNet detector window over the image and get a list of activations
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, nn

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))       # Set 128x128 window.
sensor.skip_frames(time=750)           # Don't let autogain run very long.
sensor.set_auto_gain(False)            # Turn off autogain.
sensor.set_auto_exposure(False)        # Turn off whitebalance.

# Load cifar10 network (You can get the network from OpenMV IDE).
net = nn.load('/cifar10.network')
# Faster, smaller and less accurate.
# net = nn.load('/cifar10_fast.network')
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

clock = time.clock()
while(True):
    clock.tick()

    img = sensor.snapshot()
Beispiel #6
0
# Sensor Sleep Mode Example.
# This example demonstrates the sensor sleep mode. The sleep mode saves around
# 40mA when enabled and it's automatically cleared when calling sensor reset().

import sensor, image, time

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 3000)     # Capture camera for 3000ms.
sensor.sleep(True)                  # Enable sensor sleep mode (saves about 40mA).
# 设置绿色的阈值,括号里面的数值分别是L A B 的最大值和最小值(minL, maxL, minA,
# maxA, minB, maxB),LAB的值在图像左侧三个坐标图中选取。如果是灰度图,则只需
# 设置(min, max)两个数字即可。

# 你可能需要调整上面的阈值来跟踪绿色的东西…
# 在Framebuffer中选择一个区域来复制颜色设置。

sensor.reset()  # 初始化sensor

sensor.set_pixformat(sensor.RGB565)  # use RGB565.
#设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种

sensor.set_framesize(sensor.QQVGA)  # 使用QQVGA的速度。
#设置图像像素大小

sensor.skip_frames(10)  # 让新的设置生效。
sensor.set_auto_whitebal(False)  # turn this off.
#关闭白平衡。白平衡是默认开启的,在颜色识别中,需要关闭白平衡。
clock = time.clock()  # 跟踪FPS帧率

while (True):
    clock.tick()  # 追踪两个snapshots()之间经过的毫秒数.
    img = sensor.snapshot()  # 拍一张照片并返回图像。

    blobs = img.find_blobs([green_threshold])
    #find_blobs(thresholds, invert=False, roi=Auto),thresholds为颜色阈值,
    #是一个元组,需要用括号[ ]括起来。invert=1,反转颜色阈值,invert=False默认
    #不反转。roi设置颜色识别的视野区域,roi是一个元组, roi = (x, y, w, h),代表
    #从左上顶点(x,y)开始的宽为w高为h的矩形区域,roi不设置的话默认为整个图像视野。
    #这个函数返回一个列表,[0]代表识别到的目标颜色区域左上顶点的x坐标,[1]代表
    #左上顶点y坐标,[2]代表目标区域的宽,[3]代表目标区域的高,[4]代表目标
import sensor, image, time

# For color tracking to work really well you should ideally be in a very, very,
# very, controlled enviroment where the lighting is constant. Additionally, if
# you want to track more than 2 colors you need to set the boundaries for them
# very narrowly. If you try to track... generally red, green, and blue then
# you will end up just tracking everything which you don't want.
red_threshold   = (  40,   60,   60,   90,   50,   70)
blue_threshold  = (   0,   20,  -10,   30,  -60,   10)
# You may need to tweak the above settings for tracking red and blue things...
# Select an area in the Framebuffer to copy the color settings.

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # use RGB565.
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
sensor.skip_frames(10) # Let new settings take affect.
sensor.set_whitebal(False) # turn this off.
clock = time.clock() # Tracks FPS.

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    blobs = img.find_blobs([red_threshold, blue_threshold])
    merged_blobs = img.find_markers(blobs)
    if merged_blobs:
        for b in merged_blobs:
            # Draw a rect around the blob.
            img.draw_rectangle(b[0:4]) # rect
            img.draw_cross(b[5], b[6]) # cx, cy
            # Draw the color label. b[8] is the color label.
GreenLed = pyb.LED(2)
BlueLed = pyb.LED(3)
clock = time.clock()
usart = pyb.UART(3, 115200, timeout_char=100)
usart.init(115200,
           bits=8,
           parity=None,
           stop=1,
           timeout_char=1000,
           read_buf_len=64)

#初始化感光元件
sensor.reset()
sensor.set_pixformat(sensor.RGB565)  #sensor.GRAYSCALE -> 灰度图(加快二维码扫描)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(50)
#sensor.set_contrast(2)              #对比度  -3~3
sensor.set_brightness(1)  #亮度
sensor.set_auto_gain(False)  #自动增益
#sensor.set_auto_whitebal(False,(-5.243186, -6.02073, -2.498888))     #自动白平衡 实验室光照增益
sensor.set_auto_whitebal(False, (-6.02073, -5.243186, -1.972561))
sensor.set_auto_exposure(False)  #自动曝光

#色块阈值
Light_threshold = (0, 52, -128, 127, -128, 127)  #二维码扫描位图转化阈值
#Red_threshold = (23, 77, 18, 101, -16, 96)
Red_threshold = (6, 68, 36, 127, -3, 47)
Green_threshold = (29, 67, -91, -27, -3, 49)
Blue_threshold = (13, 50, -29, 44, -89, -18)
Target_threshold = [Red_threshold, Green_threshold, Blue_threshold]
Target_List = [None, None, None, None]  #过滤后的目标对象 前3元素为色块对象列表 第4元素为二维码对象(不是列表)
Beispiel #10
0
# Untitled - By: Gehaha - 周二 11月 20 2018

import sensor, image, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3


sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.B128X128)
sensor.set_windowing((92,112))
sensor.skip_frames(10) # let new settings take affect
sensor.skip_frames(time = 2000)
num = 1 # 设置被拍摄者序号,第一个人的图片保存到s1文件夹中,第二个保存到说中,以此类推
n = 20   # 设置每个人拍摄图片数量
while(n):
    #红灯亮
    pyb.LED(RED_LED_PIN).on()
    sensor.skip_frames(time = 3000) #等待3秒准备表情

    #红灯灭,蓝灯亮
    pyb.LED(RED_LED_PIN).off()
    pyb.LED(BLUE_LED_PIN).on()

    # 保存截取的的图片到sd卡
    print(n)
    sensor.snapshot().save("singtown/s%s/%s.bmp" %(num,n))
    n -= 1
    pyb.LED(BLUE_LED_PIN).off()
    print("Done! Rest the camera to see the saved image")
Beispiel #11
0
#car_tail   = (1,100,20,70,68,90 ) #(0) 尾(预计橙色
#car_tail   = (1,100,20,70,60,90 ) # 尾(预
car_tail = (1, 100, 20, 70, 45, 90)  #(2)
car_head = (1, 100, -71, -30, 30, 80)  # 头(绿色)
#car_head =  (1,100,-71,-30,40,80) # 头(亮绿色)
target = (1, 100, 28, 90, -60, 20)  #原始信标灯
#target = (1,100,28,90,10,20 )
target_miss = (35, 100, -16, 15, -10, 44)
#target_miss =(1,100,-22,61,-48,51)

sensor.set_colorbar(True)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.CIF)
sensor.set_windowing((240, 240))  # 240x240 center pixels of VGA
sensor.skip_frames(2)  #采取每秒40帧
sensor.set_auto_gain(False)  # must be turned off for color tracking
sensor.set_auto_whitebal(
    False)  # must be turned off for color tracking ,必须关闭用于颜色跟踪
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" merges all overlapping blobs in the image.
#只有具有比“pixel_threshold”更多的像素和比“area_threshold”更多的区域的斑点
#由下面的“find_blob”返回。 更改“像素阈值”和“区域阈值”
#相机分辨率。 “merge = True”合并图像中的所有重叠的斑点
car = 0
angle_float = 0  #用于记录角度大小
angle_char_send = 0  #用于转换角度大小和A,B,C,D等级关系
x0 = 0  #用来记录车尾位置
Beispiel #12
0
# Note, this script works well assuming most parts of the image do not
# pass the thresholding test... otherwise, you don't get a distance
# benefit.

import sensor, image, time, math, omv

# Set the thresholds to find a white object (i.e. tag border)
thresholds = (150, 255)

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA)
elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA)
else: raise Exception("You need a more powerful OpenMV Cam to run this script")
sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.
tag_families = 0
tag_families |= image.TAG16H5 # comment out to disable this family
tag_families |= image.TAG25H7 # comment out to disable this family
tag_families |= image.TAG25H9 # comment out to disable this family
tag_families |= image.TAG36H10 # comment out to disable this family
tag_families |= image.TAG36H11 # comment out to disable this family (default family)
tag_families |= image.ARTOOLKIT # comment out to disable this family

while(True):
#Note - code has been built for retroflective tape
#4189
import sensor, image, time, pyb

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000)     # Wait for settings take effect.            # Create a clock object to track the FPS
sensor.set_contrast(3)                      #settings to increase difference between retrotape and
sensor.set_brightness(-3)
sensor.set_saturation(3)

threshold = (100, 38, 4, -13, 10, -28) # Thresholds for IR Tape when differenced

led = pyb.LED(4); #IR LEDs

extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
pixels = 0 # varible for storing central x values of blobs
while(True):
    #stores one frame with IR leds on and one with IR LEDs off. Differencing the two frames will allow to remove
    #almost all of the background noise while having only the tape visible
    led.on()
    extra_fb.replace(sensor.snapshot())
    led.off()
    img = sensor.snapshot()
    img =  img.difference(extra_fb)
    img = img.mean(3) #blurs the image to further remove background noise
    for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10):
        img.draw_rectangle(blob.rect())
        pixels = blob.cx();
    print(pixels)
Beispiel #14
0
# Hello CorgiDude | From AiDude.io, aiiotshop.com/p/58
import sensor, image, lcd, time
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
#sensor.set_windowing((224, 224))
sensor.skip_frames(time=100)
sensor.set_vflip(1)
lcd.init()
lcd.rotation(2)
timep = 0
while (True):
    #calculate fps
    fps = 1000 / (time.ticks_ms() - timep)
    timep = time.ticks_ms()
    #display image on screen
    img = sensor.snapshot()
    img = img.cut(0, 0, 240, 240)
    img.draw_string(5, 5, "fps = %2.1f" % fps, color=(0, 255, 0), scale=2)
    lcd.display(img)
Beispiel #15
0
# Note, this script works well assuming most parts of the image do not
# pass the thresholding test... otherwise, you don't get a distance
# benefit.

import sensor, image, time, math, omv

# Set the thresholds to find a white object (i.e. tag border)
thresholds = (150, 255)

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA)
elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA)
else: raise Exception("You need a more powerful OpenMV Cam to run this script")
sensor.skip_frames(
    time=200)  # increase this to let the auto methods run for longer
sensor.set_auto_gain(False)  # must be turned off for color tracking
sensor.set_auto_whitebal(False)  # must be turned off for color tracking
clock = time.clock()

# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.
tag_families = 0
tag_families |= image.TAG16H5  # comment out to disable this family
tag_families |= image.TAG25H7  # comment out to disable this family
tag_families |= image.TAG25H9  # comment out to disable this family
tag_families |= image.TAG36H10  # comment out to disable this family
tag_families |= image.TAG36H11  # comment out to disable this family (default family)
tag_families |= image.ARTOOLKIT  # comment out to disable this family

while (True):
Beispiel #16
0
# Sensor Sleep Mode Example.
# This example demonstrates the sensor sleep mode. The sleep mode saves around
# 40mA when enabled and it's automatically cleared when calling sensor reset().

import sensor, image, time

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 3000)     # Capture frames for 3000ms.
sensor.sleep(True)                  # Enable sensor sleep mode (saves about 40mA).
Beispiel #17
0
from machine import SPI
from fpioa_manager import fm, board_info
from machine import UART
import utime

import sensor, lcd

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.B128X128)
#sensor.set_framesize(sensor.QVGA)
sensor.run(1)
sensor.skip_frames()
lcd.init()

fm.register(board_info.PIN9,fm.fpioa.UART2_TX)
fm.register(board_info.PIN10,fm.fpioa.UART2_RX)
uart_B = UART(UART.UART2, 115200, 8, None, 1, timeout=1000, read_buf_len=4096)
spi1 = SPI(SPI.SPI1, mode=SPI.MODE_MASTER, baudrate=5000000, polarity=0 , phase=0, bits=8, firstbit=SPI.MSB, sck=6, mosi=7, miso=30, cs0=8)

def colorTo565(r, g, b):
    val = ((r & 0xf8) << 8) + ((g & 0xfc) << 3 ) + (b >> 3)

    return val & 0xff, val >> 8

def fill(r, g, b):
    l, m =  colorTo565(r, g, b)
    print(l,m)
    da = bytearray([0xff] * 128 * 128 * 2)
    for i in range(128 * 128):
        da[2 * i] = m
# detection and then thresholding and filtering that image afterwards.

import sensor, image, time

kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1
kernel = [-1, -1, -1,\
          -1, +8, -1,\
          -1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(100, 255)] # grayscale thresholds

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.

# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
    sensor.__write_reg(0xAC, 0xDF)
    sensor.__write_reg(0x8F, 0xFF)

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    img.morph(kernel_size, kernel)
    img.binary(thresholds)
Beispiel #19
0
green_threshold = (0, 100, -20, -7, 10, 41)
green_threshold = (0, 100, -27, -2, 36, 60)
blue_threshold = (11, 69, -13, 0, -14, -1)
blue_threshold = (0, 69, -13, -4, -17, -3)
blue_threshold = (0, 53, -13, -4, -18, 1)
blue_threshold = (0, 53, -13, -2, -24, -2)
blue_threshold = (0, 50, -12, -1, -18, 3)
yellow_threshold = (0, 72, -10, 1, 11, 42)
#设置绿色的阈值,括号里面的数值分别是L A B 的最大值和最小值(minL, maxL, minA,
# maxA, minB, maxB),LAB的值在图像左侧三个坐标图中选取。如果是灰度图,则只需
#设# 问吴老师 模板是什么?置(min, max)两个数字即可。

sensor.reset()  # 初始化摄像头
sensor.set_pixformat(sensor.RGB565)  # 格式为 RGB565.
sensor.set_framesize(sensor.QQVGA)  # 使用 QQVGA 速度快一些
sensor.skip_frames(10)  # 跳过10帧,使新设置生效
sensor.set_auto_whitebal(False)
# 问吴老师 模板是什么?

#关闭白平衡。白平衡是默认开启的,在颜色识别中,一定要关闭白平衡。
clock = time.clock()  # 追踪帧率
turn_threshold = 15  # rotate threshold
turn = Pin('P0', Pin.OUT_PP)
turnDone = Pin('P1', Pin.OUT_PP)
red = LED(1)
green = LED(2)
blue = LED(3)
blue.on()
time.sleep(2)
blue.off()
Beispiel #20
0
Important:
    This script should be copied to the OpenMV Cam as `main.py`.

Source:
    https://github.com/openmv/openmv/blob/master/scripts/examples/02-Board-Control/usb_vcp.py

"""
import sensor
import ustruct
import pyb

usb_vcp = pyb.USB_VCP()
# Disable USB interrupt (CTRL-C by default) when sending raw data (i.e. images)
# See: https://docs.openmv.io/library/pyb.USB_VCP.html#pyb.USB_VCP.setinterrupt
usb_vcp.setinterrupt(-1)

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.skip_frames(time=2000)  # wait for settings to take effect!

while True:

    command = usb_vcp.recv(4, timeout=5000)

    if command == b'snap':
        image = sensor.snapshot().compress()
        usb_vcp.send(ustruct.pack('<L', image.size()))
        usb_vcp.send(image)
Beispiel #21
0
       ]


# Compute the weight divisor (we're computing this so you don't have to make weights add to 1).
weight_sum = 0
for r in ROIS: weight_sum += r[4] # r[4] is the roi weight.

# Camera setup...
clock = time.clock() # Tracks FPS.
sensor.reset() # Initialize the camera sensor.
sensor.__write_reg(0x6B, 0x22)  # switches camera into advanced calibration mode. See this for more: http://forums.openmv.io/viewtopic.php?p=1358#p1358
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
sensor.set_auto_gain(True)    # do some calibration at the start
sensor.set_auto_whitebal(True)
sensor.skip_frames(60) # Let new settings take effect.
sensor.set_auto_gain(False)   # now turn off autocalibration before we start color tracking
sensor.set_auto_whitebal(False)



while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.
    centroid_sum = 0
    for r in ROIS:
        blobs = img.find_blobs([thresholds[threshold_index]], roi=r[0:4], merge=True) # r[0:4] is roi tuple.
        if blobs:
            # Find the index of the blob with the most pixels.
            most_pixels = 0
            largest_blob = 0
black_threshold = [(0, 64)]
#xy平面误差数据
err_x = 0
err_y = 0
#发送数据
uart_buf = bytearray([0x55, 0xAA, 0x00, 0x00, 0x00, 0x00, 0xAA])

#串口三配置
uart = UART(3, 115200)
uart.init(115200, bits=8, parity=None, stop=1)

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # or sensor.GRAYSCALE
sensor.set_framesize(
    sensor.QQVGA2)  # Special 128x160 framesize for LCD Shield.
sensor.skip_frames(20)  #相机自检几张图片
#sensor.set_auto_whitebal(False)#关闭白平衡
clock = time.clock()  #打开时钟

lcd.init()
while (True):
    clock.tick()
    img = sensor.snapshot()

    #寻找blob
    blobs = img.find_blobs(black_threshold)
    if blobs:
        most_pixels = 0
        largest_blob = 0
        for i in range(len(blobs)):
            #目标区域找到的颜色块可能不止一个,找到最大的一个
Beispiel #23
0
# Movement Detection
# cansik @ bildspur 2018

import sensor, image, pyb, os, time, utime, math

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.skip_frames(time=500)  # Wait for settings take effect.
sensor.set_auto_whitebal(True)
clock = time.clock()  # Create a clock object to track the FPS.

# windowing
window_width = 320  # max 320
window_height = 150  # max 240
roi = ((sensor.width() / 2) - (window_width / 2),
       (sensor.height() / 2) - (window_height / 2), window_width,
       window_height)
sensor.set_windowing((int(roi[0]), int(roi[1]), int(roi[2]), int(roi[3])))

# positional settings
sensor.set_hmirror(True)
sensor.set_vflip(False)

# variables
extra_fb = sensor.alloc_extra_fb(window_width, window_height, sensor.GRAYSCALE)
uart = pyb.UART(3, 9600, timeout_char=1000)

red_led = pyb.LED(1)
green_led = pyb.LED(2)
Beispiel #24
0
# Basic Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.

import sensor, image, pyb, os, time

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA)  # or sensor.QQVGA (or others)
sensor.skip_frames(10)  # Let new settings take affect.
sensor.set_whitebal(False)  # Turn off white balance.
clock = time.clock()  # Tracks FPS.

if not "temp" in os.listdir(): os.mkdir("temp")  # Make a temp directory

print("About to save background image...")
sensor.skip_frames(60)  # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.

    # Replace the image with the "abs(NEW-OLD)" frame difference.
    img.difference("temp/bg.bmp")
Beispiel #25
0
import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(10)
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    print(clock.fps())
    img.draw_string(10, 10, "missing main.py", 0)
# We're using the robust=True argument for get_regression() in this script which
# computes the linear regression using a much more robust algorithm... but potentially
# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED
# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually
# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY!

THRESHOLD = (0, 100) # Grayscale threshold for dark things...
BINARY_VISIBLE = True # Does binary first so you can see what the linear regression
                      # is being run on... might lower FPS though.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000.
sensor.skip_frames(time = 2000)     # WARNING: If you use QQVGA it may take seconds
clock = time.clock()                # to process a frame sometimes.

while(True):
    clock.tick()
    img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot()

    # Returns a line object similar to line objects returned by find_lines() and
    # find_line_segments(). You have x1(), y1(), x2(), y2(), length(),
    # theta() (rotation in degrees), rho(), and magnitude().
    #
    # magnitude() represents how well the linear regression worked. It means something
    # different for the robust linear regression. In general, the larger the value the
    # better...
    line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True)
            else:
                h = (((b - r) / df) + 2) % 6

            if mx == 0:
                s = 0
            else:
                s = df / mx

    return h, s, mx  # 0-6 : 0-1 : 0-255 - range of output value(small transform for hsv)


micropython.opt_level(3)  # Set maxium optimization level
sensor.reset()
sensor.set_pixformat(sensor.RGB565)  # Set pixel format to RGB
sensor.set_framesize(sensor.QVGA)  # Optimised solution for quality
sensor.skip_frames(time=3000)  # Wait for calibrate
sensor.set_auto_gain(False)  # Disable auto gain control
sensor.set_auto_whitebal(False)  # Disable white balance
sensor.set_auto_exposure(False)  # Disable auto exposure correction

W = sensor.width()  # Get width of camera
H = sensor.height()  # Get height of camera

lcx = int(W / 2)
lcy = int(H / 2)
last_height = 20
last_width = 120
w_step = last_width / 4
h_step = last_height / 4

tt = time.ticks()
Beispiel #28
0
import sensor, image, time


red_threshold  = (13, 49, 18, 61, 6, 47)
# L亮度;a的正数代表红色,负端代表绿色;b的正数代表黄色,负端代表兰色--(minL, maxL, minA, maxA, minB, maxB)
red_threshold  = [(13, 49, 18, 61, 6, 47),(10, 50, 30, 10, 5, 40),(0, 38, 50, 14, 10, 30)]


sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # use RGB565.
sensor.set_framesize(sensor.HQVGA) # use QQVGA for speed.
sensor.skip_frames(10) # Let new settings take affect.
sensor.set_auto_whitebal(False) # turn this off.
clock = time.clock() # Tracks FPS.

def find_max(blobs):
    max_size=0
    for blob in blobs:
        if blob[2]*blob[3] > max_size:
            max_blob=blob
            max_size = blob[2]*blob[3]
    return max_blob


while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    blobs = img.find_blobs([red_threshold])
    if blobs:
        max_blob = find_max(blobs)
Beispiel #29
0
import sensor, image, time
from pyb import LED
from pyb import UART, Timer
import math
uart = UART(3, 115200)  #初始化串口 波特率 115200
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)  # 80x60 (4,800 pixels)
sensor.skip_frames(time=2000)  # WARNING: If you use QQVGA it may take seconds
sensor.set_auto_gain(True)  # 在进行颜色追踪时,必须关闭
sensor.set_auto_whitebal(True)  # 在进行颜色追踪时,必须关闭
clock = time.clock()  # to process a frame sometimes.

up_roi = [5, 0, 70, 5]  #上采样区0  [0,   0, 80, 15]
down_roi = [5, 55, 70, 5]  #下采样区0  [0, 55, 80, 15]
mid_roi = [5, 5, 70, 50]  #中心横向采样区 [15, 15, 50, 30]
left_roi = [0, 0, 5, 60]  #左采样区0  [0,   0, 25, 60]
righ_roi = [75, 0, 5, 60]  #右采样区0  [55, 0,  25, 40]
thresholds = [(30, 0, -128, 127, -128, 127)]  #检测黑色物体的颜色阈值,根绝不同的环境,需要有适当的修改
THRESHOLD = (138, 186)  # Grayscale threshold for dark things...


class Dot(object):
    x = 0
    y = 0
    pixels = 0
    num = 0
    ok = 0
    flag = 0

# Perspective Correction
#
# This example shows off how to use the rotation_corr() to fix perspective
# issues related to how your OpenMV Cam is mounted.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()

# The image will be warped such that the following points become the new:
#
#   (0,   0)
#   (w-1, 0)
#   (w-1, h-1)
#   (0,   h-1)
#
# Try setting the points below to the corners of a quadrilateral
# (in clock-wise order) in the field-of-view. You can get points
# on the image by clicking and dragging on the frame buffer and
# recording the values shown in the histogram widget.

w = sensor.width()
h = sensor.height()

TARGET_POINTS = [(0,   0),   # (x, y) CHANGE ME!
                 (w-1, 0),   # (x, y) CHANGE ME!
                 (w-1, h-1), # (x, y) CHANGE ME!
# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better!
# To get the models please see the CNN Network library in OpenMV IDE under
# Tools -> Machine Vision. The labels are there too.
#
# In this example we slide the detector window over the image and get a list
# of activations. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, tf

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240))  # Set 240x240 window.
sensor.skip_frames(time=2000)  # Let the camera adjust.

mobilenet_version = "1"  # 1
mobilenet_width = "0.5"  # 1.0, 0.75, 0.50, 0.25
mobilenet_resolution = "128"  # 224, 192, 160, 128

mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (
    mobilenet_version, mobilenet_width, mobilenet_resolution)
labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")]

clock = time.clock()
while (True):
    clock.tick()

    img = sensor.snapshot()
Beispiel #32
0
# LetNet Example
import sensor, image, time, os, nn

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))       # Set 128x128 window.
sensor.skip_frames(time=100)
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)

# Load lenet network
net = nn.load('/lenet.network')
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

clock = time.clock()                # Create a clock object to track the FPS.
while(True):
    clock.tick()                    # Update the FPS clock.
    img = sensor.snapshot()         # Take a picture and return the image.
    out = net.forward(img.copy().binary([(150, 255)], invert=True))
    max_idx = out.index(max(out))
    score = int(out[max_idx]*100)
    if (score < 70):
        score_str = "??:??%"
    else:
        score_str = "%s:%d%% "%(labels[max_idx], score)
    img.draw_string(0, 0, score_str)

    print(clock.fps())             # Note: OpenMV Cam runs about half as fast when connected
                                   # to the IDE. The FPS should increase once disconnected.
Beispiel #33
0
sensor.reset() # Initialize the camera sensor.

###
# sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
# sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others)
# sensor.set_windowing((92,112))

###
# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
# HQVGA and GRAYSCALE are the best for face tracking.
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

sensor.skip_frames(10) # Let new settings take affect.
sensor.skip_frames(time = 2000)

num = 2 #设置被拍摄者序号,第一个人的图片保存到s1文件夹,第二个人的图片保存到s2文件夹,以此类推。每次更换拍摄者时,修改num值。

n = 10 #设置每个人拍摄图片数量。

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=200)
print(face_cascade)
def facsTest(img,thresholdSize = 9000):
    # Find objects.
    # Note: Lower scale factor scales-down the image more and detects smaller objects.
    # Higher threshold results in a higher detection rate, with more false positives.
    objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
Beispiel #34
0
import sensor, image, time

# Setup Camera
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(10)
threshold = (10, 90, -80, -30, 20, 50)
clock = time.clock()

# Find blobs
while(True):
    clock.tick()
    img = sensor.snapshot()
    for b in img.find_blobs([threshold]):
        img.draw_rectangle(b[0:4])
        print("====\nBlob %s" % str(b))
    print("FPS %d" % clock.fps())
Beispiel #35
0
        return self.out


KfpX = Kalman(0.2, 0, 0, 0, 1, 0.543)

KfpY = Kalman(0.2, 0, 0, 0, 1, 0.543)

pid_x = PID(100, 0, 0, 0.7, 0.2, 0.6, 0, 0, 0)

pid_y = PID(100, 0, 0, -0.7, -0.2, 0.6, 0, 0, 0)

sensor.reset()  # 初始化摄像头
sensor.set_pixformat(sensor.RGB565)  # 格式为 RGB565.
sensor.set_framesize(sensor.QVGA)  # 使用 QQVGA 速度快一些
sensor.set_auto_exposure(False, 35000)  # 设置曝光时间
sensor.skip_frames(time=2000)  # 跳过2000s,使新设置生效,并自动调节白平衡
sensor.set_auto_gain(False)  # 关闭自动增益
sensor.set_auto_whitebal(False)  #关闭白平衡。白平衡是默认开启的,在颜色识别中,一定要关闭白平衡。
uart = UART(3, 115200)

control_val_x = 1350  #云台初始x位置
control_val_y = 1100  #云台初始y位置

MIN_LIMIT_x = 1000  #云台x最小值
MAX_LIMIT_x = 1700  #云台x最大值

MIN_LIMIT_y = 950  #云台y最小值
MAX_LIMIT_y = 1250  #云台y最大值

ans_x = 146  #弹道落点在图片上的x坐标
ans_y = 129  #弹道落点在图片上的y坐标
Beispiel #36
0
# Untitled - By: kutenai - Wed Aug 10 2016

import sensor

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames()

while(True):
    img = sensor.snapshot()

Beispiel #37
0
# Edge detection with Canny:
#
# This example demonstrates the Canny edge detector.
import sensor, image, time

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA)  # or sensor.QVGA (or others)
sensor.skip_frames(time=2000)  # Let new settings take affect.
sensor.set_gainceiling(8)
sensor.run(1)

clock = time.clock()  # Tracks FPS.
while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.
    # Use Canny edge detector
    #img.find_edges(image.EDGE_CANNY, threshold=(50, 80))
    # Faster simpler edge detection
    img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255))
    print(clock.fps())  # Note: Your OpenMV Cam runs about half as fast while
# Basic Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.

import sensor, image, pyb, os, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.

if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory

print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    # Replace the image with the "abs(NEW-OLD)" frame difference.
    img.difference("temp/bg.bmp")
Beispiel #39
0
# Automatic Grayscale Color Tracking Example
#
# This example shows off single color automatic grayscale color tracking using the OpenMV Cam.

import sensor, image, time
print("Letting auto algorithms run. Don't put anything in front of the camera!")

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(60)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Capture the color thresholds for whatever was in the center of the image.
r = [(320//2)-(50//2), (240//2)-(50//2), 50, 50] # 50x50 center of QVGA.

print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.")
print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!")
for i in range(60):
    img = sensor.snapshot()
    img.draw_rectangle(r)

print("Learning thresholds...")
threshold = [128, 128] # Middle grayscale values.
for i in range(60):
    img = sensor.snapshot()
    hist = img.get_histogram(roi=r)
    lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)!
    hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)!
# NOTE!!! You have to use a small power of 2 resolution when using
# find_displacement(). This is because the algorithm is powered by
# something called phase correlation which does the image comparison
# using FFTs. A non-power of 2 resolution requires padding to a power
# of 2 which reduces the usefulness of the algorithm results. Please
# use a resolution like B128X128 or B128X64 (2x faster).

# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000)        # Wait for settings take effect.
clock = time.clock()                   # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    for y in range(0, sensor.height(), BLOCK_H):
        for x in range(0, sensor.width(), BLOCK_W):
# LeNet Search Just Center Example
#
# LeNet is a convolutional nueral network designed to classify it's field of view into digits 0-9.
#
# In this example we slide the LeNet detector window over the image and get a list of activations
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, nn

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))  # Set 128x128 window.
sensor.skip_frames(time=500)  # Don't let autogain run very long.
sensor.set_auto_gain(False)  # Turn off autogain.
sensor.set_auto_exposure(False)  # Turn off whitebalance.

# Load lenet network (You can get the network from OpenMV IDE).
net = nn.load('/lenet.network')
labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

clock = time.clock()
while (True):
    clock.tick()

    img = sensor.snapshot()
    tmp_img = img.copy().binary([(150, 255)], invert=True)

    # net.search() will search an roi in the image for the network (or the whole image if the roi is not
#
# A color code is a blob composed of two or more colors. The example below will
# only track colored objects which have both the colors below in them.

import sensor, image, time, math

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
# The below thresholds track in general red/green things. You may wish to tune them...
thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0)
              (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1)
# Codes are or'ed together when "merge=True" for "find_blobs".

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True):
        if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0)
            # These values depend on the blob not being circular - otherwise they will be shaky.
            if blob.elongation() > 0.5:
"""
DCT_reconstruct.py - By: Keiko Nagami - Friday Mar 13 2020
- Reads bytearrays containing quantized DCT coefficients of five 8x8pixel tiles from text files
- Reconstructs the Quantized DCT coefficient matrices for each tile and computes the inverse DCT to get raw bayer image data
- Saves .png image tiles
- Run this with camera board, WITH SD card plugged in, in OpenMV IDE
"""

# import relevant libraries
import sensor, image, math, umatrix, ulinalg, ulab

sensor.reset()  # initialize the camera sensor
sensor.set_pixformat(sensor.RGB565)  # sensor.RGB565 takes RGB image
sensor.set_framesize(sensor.VGA)  # sensor.VGA takes 640x480 image
sensor.skip_frames(time=2500)  # let new settings take effect

# Bayer filter uses RGGB pattern. Each set contains row indices that correspond to the R,G, or B
set_R = [
    0,
    4,
    8,
    12,
]  # R
set_G = [
    1,
    2,
    5,
    6,
    9,
    10,
    13,