예제 #1
0
#
# NOTE: Please reset the camera after running this script to see the new file.
import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)

sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)

FILE_NAME = "desc"
img = sensor.snapshot()
# NOTE: See the docs for other arguments
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)

if (kpts == None):
    raise(Exception("Couldn't find any keypoints!"))

image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME))
img.save("/%s.pgm"%(FILE_NAME))

img.draw_keypoints(kpts)
sensor.snapshot()
e_lab_color_signatures = [] # original enabled threshold indexes
for i in range(len(lab_color_thresholds)):
    if lab_color_thresholds[i][7]:
        e_lab_color_thresholds.append(lab_color_thresholds[i][0:6])
        e_lab_color_code.append(lab_color_thresholds[i][6])
        e_lab_color_signatures.append(i + 1)

import image, math, pyb, sensor, struct, time

# Camera Setup

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)

# LED Setup

red_led = pyb.LED(1)
green_led = pyb.LED(2)
blue_led = pyb.LED(3)

red_led.off()
green_led.off()
blue_led.off()

# DAC Setup

dac = pyb.DAC("P6") if analog_out_enable else None
예제 #3
0
# AprilTags Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.


import machine, gc,utime
from pyb import LED
import sensor, image, time, math
import LPF2Class

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger...
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...

print("made it here")

# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.

# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.

tag_families = 0
tag_families |= image.TAG16H5 # comment out to disable this family
tag_families |= image.TAG25H7 # comment out to disable this family
tag_families |= image.TAG25H9 # comment out to disable this family
tag_families |= image.TAG36H10 # comment out to disable this family
tag_families |= image.TAG36H11 # comment out to disable this family (default family)
예제 #4
0
 def decrease_exposure(self):
     sensor.set_auto_exposure(False,
                              exposure_us=sensor.get_exposure_us() - 1000)
     sensor.set_auto_gain(False)
예제 #5
0
import sensor
import machine
import image
import time

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)  # 必须关闭此功能,以防止图像冲洗…
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    #img.lens_corr(1.8) # 1.8的强度参数对于2.8mm镜头来说是不错的。
    for code in img.find_qrcodes():
        img.draw_rectangle(code.rect(), color=(160, 0, 0))
        #print(code)
        message = code.payload()
        #print(message)
        if (code.payload() == '12'):
            print('Extracted materials')
    #print(clock.fps())

# 二维码例程
##红绿灯   颜色识别+形状识别(圆形)
예제 #6
0

#
# Camera Control Code
#

sensor.reset()
sensor.set_pixformat(
    sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE)
sensor.set_framesize(FRAME_SIZE)
sensor.set_vflip(True)
sensor.set_hmirror(True)
sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \
                     int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION)))
sensor.skip_frames(time=200)
if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False)
if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False)
clock = time.clock()
#sensor.set_auto_exposure(False, \
#    exposure_us = 300)

###########
# Loop
###########

old_time = pyb.millis()

throttle_old_result = None
throttle_i_output = 0
throttle_output = THROTTLE_OFFSET
예제 #7
0
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)  #Resolution, QVGA = 42FPS,QQVGA = 85FPS

sensor.skip_frames(time=500)

sensor.set_auto_exposure(False)
sensor.set_auto_whitebal(False)
# Need to let the above settings get in...
sensor.skip_frames(time=500)
#sensor.set_windowing((35, 8, 112, 112)) # Robot 0
sensor.set_windowing((37, 0, 112, 112))  # Robot 1

# === GAIN ===
curr_gain = sensor.get_gain_db()
sensor.set_auto_gain(False, gain_db=curr_gain)

# === EXPOSURE ===
curr_exposure = sensor.get_exposure_us()
sensor.set_auto_exposure(False, exposure_us=int(curr_exposure))

# === WHITE BAL ===
sensor.set_auto_whitebal(False, rgb_gain_db=((-6.02073, -2.144467, 5.12728)))

#sensor.set_auto_whitebal(False,
#rgb_gain_db=((-6.02073, -6.02073, 1.376936)))

sensor.set_brightness(2)
sensor.set_contrast(3)
sensor.set_saturation(3)
예제 #8
0
파일: main.py 프로젝트: califafa/ETAfly
threshold_index = 0  # 0 for red, 1 for green, 2 for blue

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
thresholds = [
    (21, 84, 22, 90, -18, 58),  # 橙红色块
    (30, 100, -64, -8, -32, 32),  # generic_green_thresholds
    (0, 30, 0, 64, -128, 0)
]  # generic_blue_thresholds

sensor.reset()  # 传感器复位
sensor.set_pixformat(
    sensor.RGB565
)  # RGB565即一个彩色图像点由RGB三个分量组成,总共占据2Byte,高5位为R分量,中间6位为G分量,低5位为B分量
sensor.set_framesize(sensor.QQVGA)  # 320*240
sensor.skip_frames(time=500)  # 跳过,等待摄像头稳定
sensor.set_auto_gain(False)  # 自动增益在颜色识别中一般关闭,不然会影响阈值
sensor.set_auto_whitebal(False)  # 白平衡在颜色识别中一般关闭,不然会影响阈值
clock = time.clock()  # 构造时钟对象

uart = UART(3, 115200)
uart.init(115200, bits=8, parity=None, stop=1,
          timeout_char=1000)  # 使用给定参数初始化 timeout_char是以毫秒计的等待字符间的超时时长


class ctrl_info(object):
    WorkMode = 0x03  # 色块检测模式  0x01为固定单颜色识别  0x02为自主学习颜色识别  0x03 巡线
    Threshold_index = 0x00  # 阈值编号


ctrl = ctrl_info()  # 定义控制信息类
single_blob.InitSuccess_LED()  # 初始化完成 Green LED 快闪2下
예제 #9
0
    except:
        return 0


def prntime(ms):
    s = ms / 1000
    m, s = divmod(s, 60)
    h, m = divmod(m, 60)
    d, h = divmod(h, 24)
    return d, h, m, s


sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(1)
sensor.set_auto_exposure(1)
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_contrast(0)
#sensor.skip_frames(5)
sensor.set_hmirror(1)  #设置摄像头镜像
sensor.set_vflip(1)  #设置摄像头翻转
sensor.run(1)
#task = kpu.load(0x300000) # you need put model(face.kfpkg) in flash at address 0x300000
task = kpu.load("/sd/facedetect.kmodel")
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)
a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
if not "temp" in os.listdir("/sd/"):
    os.mkdir("/sd/temp/")  # Make a temp directory
예제 #10
0
파일: detektor_cepa.py 프로젝트: opinau/cv
#Use bilt in LED, usefull for debugging purposes
# please note that additional light source reflected from polished bottle surface can confuse camera
red_led = LED(
    1
)  # use only for debugging / camera status, see previous line / not really used in code

sensor.reset()  # Initialize the camera sensor
sensor.set_pixformat(
    sensor.GRAYSCALE
)  # set camera format to grayscale (color not important in this example)
sensor.set_framesize(sensor.QVGA)  # set camera resolution to QVGA 320 x 240
sensor.set_auto_exposure(
    False, exposure_us=user_exposure_time
)  # set exposure time, user changable (user_exposure_time variable)
sensor.set_auto_gain(
    False, gain_db=10
)  #set camera gain, keep it as this value is optimised for given light source and exposure time

#these setting are manually set in order to prevent camera to change it during use (no automatic setting in machine vision!)
sensor.set_brightness(2)  #set camera brightness
sensor.set_contrast(3)  #set camera contrast
sensor.set_saturation(0)  #set camera saturation

###################################################
### input tempate images
# Please use small (eg 50 x 100 px or smaller) images in PGM format, grayscale
# place in camera root (stored microSD card)
# note that copy_to_fb = True is not used, since extra fb is required for morphological operations
# newer / more advanced cameras like M7plus may have sufficient memory to store multiple frame buffers and further accelerate code
# this tempalte is compatible to most standard beer bottles available in stores
# if different beer bottle is used, please create your own template that should match provided template
예제 #11
0
                i = 0
                for b in bls:
                    if b.area() > 1000:
                        i += 1
                        img.draw_rectangle(b.x(), b.y(), b.w(), b.h(),
                                           (255, 100 * i, 50 * i), 2)

                todo = marks_recognition(img, bls)
    except:
        pass
    return todo, a, a1


if __name__ == "__main__":
    sensor.reset()
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    sensor.set_auto_gain(False, 15)
    sensor.skip_frames(time=2000)

    clock = time.clock()

    while (True):
        clock.tick()
        img = sensor.snapshot()
        img.lens_corr(1.7, 1.3)
        todo, a, a1 = marksss(img)
        print(todo, a, a1)

    #img.binary([(19, 61, -50, -24, -26, 31)], False)
def findCubeCenter():
    """
    find the cube roi position
    """
    global roi
    sensor.set_auto_whitebal(False)
    sensor.set_contrast(2)

    cnt = 1
    LAB_THRESHOLD = (
        (0, 60, -40, 50, -40, 10),  # blue
        (0, 40, -50, 40, -60, 30),  # yellow orange red white
        (0, 50, -40, 15, -25, 70))
    #(0, 70, -25, 15, -60, 30)) # green

    CENTER_THRESHOLD = roi[2] / 3 / 2
    gain = 0
    while (True):

        if cnt > 12:
            sensor.set_auto_gain(gain)

        img = sensor.snapshot()

        if cnt % 60 == 0:
            cnt = 1
            gain += 10

        if (int(cnt / 24)) % 2 == 1:
            lab_threshold = LAB_THRESHOLD[int(cnt / 12) - 2]
            img = img.binary([lab_threshold])
            img = img.dilate(2)
            img = img.erode(2)

        lcd.display(img)

        center_roi = list(
            map(int, [
                roi[0] + roi[2] / 2 - CENTER_THRESHOLD * 2,
                roi[1] + roi[3] / 2 - CENTER_THRESHOLD * 2,
                CENTER_THRESHOLD * 4, CENTER_THRESHOLD * 4
            ]))
        squares = []
        for r in img.find_rects(roi=center_roi, threshold=500):
            if (isSquare(r)):
                squares.append(r)
                img = img.draw_rectangle(r.rect())
                for p in r.corners():
                    img = img.draw_circle(p[0], p[1], 5, color=(0, 255, 0))
                lcd.display(img)
                #time.sleep_ms(5000)
        if not squares:
            cnt += 1
            print(cnt)
        else:
            roi = findCenter(squares, roi, CENTER_THRESHOLD * math.sqrt(2))
            center_roi = list(
                map(int, [
                    roi[0] + roi[2] / 2 - CENTER_THRESHOLD * 2,
                    roi[1] + roi[3] / 2 - CENTER_THRESHOLD * 2,
                    CENTER_THRESHOLD * 4, CENTER_THRESHOLD * 4
                ]))
            img = img.draw_rectangle(center_roi)
            img = img.draw_rectangle(roi)

            lcd.display(img)

            sensor.reset()
            sensor.set_pixformat(sensor.RGB565)
            sensor.set_framesize(sensor.QQVGA)

            sensor.set_auto_whitebal(False)
            sensor.skip_frames(time=60)
            gain = sensor.get_gain_db()
            sensor.set_auto_gain(0, gain)
            sensor.skip_frames(time=60)
            sensor.set_auto_exposure(0, 80000)

            sensor.skip_frames(time=60)
            return 1
예제 #13
0
# In Memory Shadow Removal w/ Frame Differencing Example
#
# This example demonstrates using frame differencing with your OpenMV Cam using
# shadow removal to help reduce the affects of cast shadows in your scene.

import sensor, image, pyb, os, time

TRIGGER_THRESHOLD = 5

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x.
    sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_gain(False) # Turn this off too.
clock = time.clock() # Tracks FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now. However,
# frame differencing doesn't use a lot of the extra space in the frame buffer.
# But, things like AprilTags do and won't work if you do this...
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)

print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
extra_fb.replace(sensor.snapshot())
print("Saved background image - Now frame differencing!")
예제 #14
0
#
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
# different object types and works on RGB video data.
#
# In this example we slide the LeNet detector window over the image and get a list of activations
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, nn

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))       # Set 128x128 window.
sensor.skip_frames(time=750)           # Don't let autogain run very long.
sensor.set_auto_gain(False)            # Turn off autogain.
sensor.set_auto_exposure(False)        # Turn off whitebalance.

# Load cifar10 network (You can get the network from OpenMV IDE).
net = nn.load('/cifar10.network')
# Faster, smaller and less accurate.
# net = nn.load('/cifar10_fast.network')
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

clock = time.clock()
while(True):
    clock.tick()

    img = sensor.snapshot()

    # net.search() will search an roi in the image for the network (or the whole image if the roi is not
예제 #15
0
# and then use gain control to make up any remaining ground.

# We can achieve the above by setting a gain ceiling on the automatic
# gain control algorithm. Once this is set the algorithm will have to
# increase the exposure time to meet any gain needs versus using gain
# to do so. However, this comes at the price of the exposure time varying
# more when the lighting changes versus the exposure being constant and
# the gain changing.

import sensor, image, time

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)

# The gain db ceiling maxes out at about 24 db for the OV7725 sensor.
sensor.set_auto_gain(True, gain_db_ceiling = 16.0) # Default gain.

# Note! If you set the gain ceiling to low without adjusting the exposure control
# target value then you'll just get a lot of oscillation from the exposure
# control if it's on.

sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.

while(True):
    clock.tick()                    # Update the FPS clock.
    img = sensor.snapshot()         # Take a picture and return the image.
    print("FPS %f, Gain %f dB, Exposure %d us" % \
        (clock.fps(), sensor.get_gain_db(), sensor.get_exposure_us()))
예제 #16
0
# Untitled - By: oillight - 周六 4月 27 2019

import sensor, image, time, math
from pyb import UART, LED
sensor.reset()
sensor.set_auto_exposure(False, 1700)
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQQVGA)
sensor.set_auto_gain(False, 27)
sensor.skip_frames(time=2000)
led_B = LED(3)
led_G = LED(2)
led_R = LED(1)
clock = time.clock()


def get_average(data):
    average = sum(data) / len(data)
    average = round(average)
    return average


def filter(data):
    length = len(data)
    if length > 2:
        sorted(data)
        tmps = data[1:length]
    else:
        tmps = data
    average = get_average(tmps)
예제 #17
0
def cal():
    flag=0
    zfx=0
    yx=0
    sjx=0
    r=[0,0,0,0]
    key = 0
    G=0
    while(True):
        key=uart.readchar()
        if key==1:
            break
        sum_zfx=0
        sum_yx=0
        sum_sjx=0
        dis=0
        clock.tick()
        img = sensor.snapshot(1.8)
        #img1 = img.binary(blue)

        for x in templates :
            img = sensor.snapshot(1.8)
            img = img.to_grayscale()
            flag = 0
            for t in x:
                clock.tick()
                img = sensor.snapshot(1.8)
                img = img.to_grayscale()

                template = image.Image(t)
                #ball = image.Image(t)
                if x == zfx_tempaltes:
                    r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
                    if r:
                        print(t)
                        zfx = r
                        sum_zfx=sum_zfx+1
                elif x == yx_tempaltes:
                    for c in img.find_circles(threshold = 3500, x_margin = 10, y_margin = 10, r_margin = 10,r_min = 2, r_max = 100, r_step = 2):
                        img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
                        if c.r()>1:
                            x=c.x()-c.r()
                            y=c.y()-c.r()
                            w=c.r()*2
                            h=c.r()*2
                            r=[x,y,w,h]
                            yx = r
                            sum_yx=20
                elif x == sjx_tempaltes:
                    r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
                    if r:
                        print(t)
                        sjx = r
                        sum_sjx=sum_sjx+1
        if (sum_zfx>sum_yx and sum_zfx>sum_sjx) :
            r=zfx
            t=8#"zfx"
        elif (sum_yx>sum_zfx and sum_yx>sum_sjx) :
            r=yx
            t=9#"yx"
        else:
            r=sjx
            t=10#"sjx"
        if (sum_zfx!=0 or sum_yx!=0 or sum_sjx!=0):

            #change[0]=r[0]+0
            #change[1]=r[1]+0
            #change[2]=r[2]-0
            #change[3]=r[3]-0
            sum_red=0
            sum_green=0
            sum_blue=0
            x=r[0]
            y=r[1]
            w=r[2]
            h=r[3]
            center_x=r[0]+int(r[2]/2)
            center_y=r[1]+int(r[3]/2)
            sensor.reset()
            sensor.set_pixformat(sensor.RGB565)
            sensor.set_framesize(sensor.QQVGA)
            sensor.skip_frames(time = 300)
            sensor.set_auto_gain(False) # must be turned off for color tracking
            sensor.set_auto_whitebal(False) # must be turned off for color tracking
            sensor.set_vflip(False)
            sensor.set_hmirror(False)
            img = sensor.snapshot(1.8)
            #r=list(r)

            i=3
            while(i>0):
                blobs = img.find_blobs(blue,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    #img.draw_cross(center_x, center_y) # cx, cy
                    img.draw_cross(max_blob.cx(), max_blob.cy())
                    #img.draw_line(x+int(w/2),y,x,y+h)
                    #img.draw_line(x,y+h,x+w,y+h)
                    #img.draw_line(x+w,y+h,x+int(w/2),y)#三角形

                    img.draw_circle(x+int(w/2),y+int(h/2),int(w/2))
                    sum_blue=sum_blue+1

                blobs = img.find_blobs(red,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    img.draw_cross(center_x, center_y) # cx, cy
                    img.draw_circle(x+int(w/2),y+int(h/2),int(h/2))
                    sum_red=sum_red+1



                blobs = img.find_blobs(green,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    img.draw_cross(center_x, center_y) # cx, cy
                    sum_green=sum_green+1
                i=i-1

            if (sum_red>sum_green and sum_red>sum_blue) :
                flag=5#"red"
            elif (sum_green>sum_red and sum_green>sum_blue) :
                flag=6#"green"
            elif (sum_blue>sum_red and sum_blue>sum_green):
                flag=7#"blue"
            else :
                flag = 0

        if(r==0 or flag == 0):
            print("没找到")
        else:
            Lm = int(r[2]/2)
            K = 25
            G=1
            length = K/Lm
            #edge =
            print("length:",length)
            print("color:",flag,"object:",t,"range:",r,"red:",sum_red,
                    "green:",sum_green,"blue:",sum_blue,"zfx_model:",sum_zfx,"yx_model:",
                    sum_yx,"sjx_model:",sum_sjx)
            uart.writechar(0x55)
            uart.writechar(0x53)
            uart.writechar(flag)
            uart.writechar(t)
            uart.writechar(Lm)
            uart.writechar(K)
            uart.writechar(G)
            uart.writechar(1)
            G=0
            break
예제 #18
0
sensor_size = sensor.QVGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
# Note that there is a -2 compared to QVGA as there looks to be a little misalignement when moving from QVGA to QQQVGA
# As we use the same remap arrays for both definitions, it is important that both are aligned
sensor.set_windowing(
    (int((sensor.width() - img_width) / 2) - 2,
     int((sensor.height() - img_height) / 2), img_width, img_height))

#get the gains and exposure
gain_db = sensor.get_gain_db()
exposure_us = sensor.get_exposure_us()
rgb_gain_db = sensor.get_rgb_gain_db()

# Set the gain and exposure to fixed values (we dont care about the values)
sensor.set_auto_gain(False, gain_db)
sensor.set_auto_exposure(False, exposure_us)
sensor.set_auto_whitebal(False, rgb_gain_db)

# Setup contrast, brightness and saturation
sensor.set_contrast(0)  # range -3 to +3
sensor.set_brightness(0)  # range -3 to +3
sensor.set_saturation(0)  # range -3 to +3

# Disable night mode (auto frame rate) and black level calibration (BLC)
sensor.__write_reg(0x0E, 0b00000000)  # Disable night mode
sensor.__write_reg(0x3E, 0b00000000)  # Disable BLC

sensor.__write_reg(0x13, 0b00000000)  # disable automated gain

예제 #19
0
# CIFAR10 Example
import sensor, image, time, os, nn

sensor.reset()  # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.RGB565)  # Set pixel format to RGB565
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))  # Set 128x128 window.
sensor.skip_frames(time=100)
sensor.set_auto_gain(True)
sensor.set_auto_exposure(True)

# Load cifar10 network
#cwd = os.getcwd()
#dirs = os.listdir(cwd)
#print(dirs)

#net = nn.load('cifar10.network')
# Faster, smaller and less accurate.
net = nn.load('/cifar10_fast.network')
labels = [
    'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
    'ship', 'truck'
]

clock = time.clock()  # Create a clock object to track the FPS.
while (True):
    clock.tick()  # Update the FPS clock.
    img = sensor.snapshot()  # Take a picture and return the image.
    out = net.forward(img)
    max_idx = out.index(max(out))
예제 #20
0
       ]


# Compute the weight divisor (we're computing this so you don't have to make weights add to 1).
weight_sum = 0
for r in ROIS: weight_sum += r[4] # r[4] is the roi weight.

# Camera setup...
clock = time.clock() # Tracks FPS.
sensor.reset() # Initialize the camera sensor.
sensor.__write_reg(0x6B, 0x22)  # switches camera into advanced calibration mode. See this for more: http://forums.openmv.io/viewtopic.php?p=1358#p1358
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
sensor.set_vflip(True)
sensor.set_hmirror(True)
sensor.set_auto_gain(True)    # do some calibration at the start
sensor.set_auto_whitebal(True)
sensor.skip_frames(60) # Let new settings take effect.
sensor.set_auto_gain(False)   # now turn off autocalibration before we start color tracking
sensor.set_auto_whitebal(False)



while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.
    centroid_sum = 0
    for r in ROIS:
        blobs = img.find_blobs([thresholds[threshold_index]], roi=r[0:4], merge=True) # r[0:4] is roi tuple.
        if blobs:
            # Find the index of the blob with the most pixels.
예제 #21
0

sensor.set_pixformat(sensor.RGB565)

#设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种



sensor.set_framesize(sensor.QVGA)

#设置图像像素大小


sensor.set_windowing((240, 240)) # VGA中心240x240像素
sensor.set_auto_exposure(False, exposure_us=2000)
sensor.set_auto_gain(False) # 颜色跟踪必须关闭自动增益
sensor.set_auto_whitebal(False, rgb_gain_db = (1,-1,1))
sensor.skip_frames(time = 2000)
clock1 = time.clock()
clock2 = time.clock()
while(True)
    x=0
    y=0
    w=0
    h=0
    while(1):    #1循环——不断找色块的循环
        clock1.tick()
        img = sensor.snapshot()
        blobs = img.find_blobs([thresholds],False,x_stride=2,y_stride=1,area_threshold=10,merge=True)
        print(clock1.fps())
        if blobs:
예제 #22
0
# pass the thresholding test... otherwise, you don't get a distance
# benefit.

import sensor, image, time, math, omv

# Set the thresholds to find a white object (i.e. tag border)
thresholds = (150, 255)

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA)
elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA)
else: raise Exception("You need a more powerful OpenMV Cam to run this script")
sensor.skip_frames(
    time=200)  # increase this to let the auto methods run for longer
sensor.set_auto_gain(False)  # must be turned off for color tracking
sensor.set_auto_whitebal(False)  # must be turned off for color tracking
clock = time.clock()

# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.
tag_families = 0
tag_families |= image.TAG16H5  # comment out to disable this family
tag_families |= image.TAG25H7  # comment out to disable this family
tag_families |= image.TAG25H9  # comment out to disable this family
tag_families |= image.TAG36H10  # comment out to disable this family
tag_families |= image.TAG36H11  # comment out to disable this family (default family)
tag_families |= image.ARTOOLKIT  # comment out to disable this family

while (True):
    clock.tick()
from pyb import UART
from pyb import Servo
RED_LED_PIN = 1
BLUE_LED_PIN = 3

TS = 3

lcd.init()

threshold_index = 0 # threshold_index的数值对应了列表thresholds[]中的色块LAB

# 口罩的LAB数值写入列表thresholds[]中 支持写入多组
thresholds = [(30, 66, -2, -32, -29, -11),  #口罩1
              (30, 66, -2, -32, -29, -11),  #粉色
              (30, 66, -2, -32, -29, -11)]  #浅黄
sensor.set_auto_gain(False)      # 关闭白平衡
sensor.set_auto_whitebal(False)  # 关闭自动增益
clock = time.clock()


uart = UART(3, 115200)

class collect_face:
    def collect(self):
        sensor.reset() # Initialize the camera sensor.
        sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
        sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others)
        sensor.set_windowing((92,112))
        sensor.skip_frames(10) # Let new settings take affect.
        sensor.skip_frames(time = 2000)
        num = 4 #设置被拍摄者序号,第一个人的图片保存到s1文件夹,第二个人的图片保存到s2文件夹,以此类推。每次更换拍摄者时,修改num值。
예제 #24
0
e_lab_color_signatures = [] # original enabled threshold indexes
for i in range(len(lab_color_thresholds)):
    if lab_color_thresholds[i][7]:
        e_lab_color_thresholds.append(lab_color_thresholds[i][0:6])
        e_lab_color_code.append(lab_color_thresholds[i][6])
        e_lab_color_signatures.append(i + 1)

import image, math, pyb, sensor, struct, time

# Camera Setup

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)

# LED Setup

red_led = pyb.LED(1)
green_led = pyb.LED(2)
blue_led = pyb.LED(3)

red_led.off()
green_led.off()
blue_led.off()

# DAC Setup

dac = pyb.DAC("P6") if analog_out_enable else None
예제 #25
0
pid_x = PID(100, 0, 0,
    0.7, 0.3, 0,
    0, 0, 0)


pid_y = PID(100, 0, 0,
    -0.7, -0.3, 0,
    0, 0, 0)


sensor.reset() # 初始化摄像头
sensor.set_pixformat(sensor.RGB565) # 格式为 RGB565.
sensor.set_framesize(sensor.QQVGA) # 使用 QQVGA 速度快一些
sensor.set_auto_exposure(False,35000) # 设置曝光时间
sensor.skip_frames(time = 2000) # 跳过2000s,使新设置生效,并自动调节白平衡
sensor.set_auto_gain(False)# 关闭自动增益
sensor.set_auto_whitebal(False)#关闭白平衡。白平衡是默认开启的,在颜色识别中,一定要关闭白平衡。
uart = UART(3, 115200)

control_val_x=1350#云台初始x位置
control_val_y=1100#云台初始y位置

MIN_LIMIT_x=1000#云台x最小值
MAX_LIMIT_x=1700#云台x最大值

MIN_LIMIT_y=950#云台y最小值
MAX_LIMIT_y=1250#云台y最大值

ans_x=70#弹道落点在图片上的x坐标
ans_y=54#弹道落点在图片上的y坐标
#
# This example demonstrates using frame differencing with your OpenMV Cam using
# shadow removal to help reduce the affects of cast shadows in your scene.

import sensor, image, pyb, os, time

TRIGGER_THRESHOLD = 5

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
if sensor.get_id() == sensor.OV7725: # Reduce sensor PLL from 6x to 4x.
    sensor.__write_reg(0x0D, (sensor.__read_reg(0x0D) & 0x3F) | 0x40)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
sensor.set_auto_gain(False) # Turn this off too.
clock = time.clock() # Tracks FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now. However,
# frame differencing doesn't use a lot of the extra space in the frame buffer.
# But, things like AprilTags do and won't work if you do this...
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)

print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
extra_fb.replace(sensor.snapshot())
print("Saved background image - Now frame differencing!")
예제 #27
0
clock = time.clock()  # Create a clock object to track the FPS.

# You have to turn automatic exposure control and automatic white blance off
# otherwise they will change the image exposure to undo any gain settings
# that you put in place...
sensor.set_auto_exposure(False)
# Need to let the above settings get in...
sensor.skip_frames(time=500)

current_gain_in_decibels = sensor.get_gain_db()
print("Current Gain == %f db" % current_gain_in_decibels)

# Auto gain control (AGC) is enabled by default. Calling the below function
# disables sensor auto gain control. The additionally "gain_db"
# argument then overrides the auto gain value after AGC is disabled.
sensor.set_auto_gain(False, \
    gain_db = current_gain_in_decibels * GAIN_SCALE)

print("New gain == %f db" % sensor.get_gain_db())
# sensor.get_gain_db() returns the exact camera sensor gain decibels.
# However, this may be a different number than what was commanded because
# the sensor code converts the gain to a small and large gain value which
# aren't able to accept all possible values...

# If you want to turn auto gain back on do: sensor.set_auto_gain(True)
# Note that the camera sensor will then change the gain as it likes.

# Doing: sensor.set_auto_gain(False)
# Just disables the gain value update but does not change the gain
# value the camera sensor determined was good.

while (True):
# You have to turn automatic exposure control and automatic white blance off
# otherwise they will change the image exposure to undo any gain settings
# that you put in place...
sensor.set_auto_exposure(False)
sensor.set_auto_whitebal(False)
# Need to let the above settings get in...
sensor.skip_frames(time = 500)

current_gain_in_decibels = sensor.get_gain_db()
print("Current Gain == %f db" % current_gain_in_decibels)

# Auto gain control (AGC) is enabled by default. Calling the below function
# disables sensor auto gain control. The additionally "gain_db"
# argument then overrides the auto gain value after AGC is disabled.
sensor.set_auto_gain(False, \
    gain_db = current_gain_in_decibels * GAIN_SCALE)

print("New gain == %f db" % sensor.get_gain_db())
# sensor.get_gain_db() returns the exact camera sensor gain decibels.
# However, this may be a different number than what was commanded because
# the sensor code converts the gain to a small and large gain value which
# aren't able to accept all possible values...

# If you want to turn auto gain back on do: sensor.set_auto_gain(True)
# Note that the camera sensor will then change the gain as it likes.

# Doing: sensor.set_auto_gain(False)
# Just disables the gain value update but does not change the gain
# value the camera sensor determined was good.

while(True):
예제 #29
0
파일: test.py 프로젝트: yuyuyu00/Car
flagB = 1

# 红绿蓝 阈值 要根据实际颜色调试
red_threshold = (93, 24, 98, 29, -54, 90)
green_threshold = (34, 13, -14, -124, -106, 34)
blue_threshold = (48, 0, -68, 87, -128, -18)

# RGB 阈值的组合  对应code 1  2  4
thresholds = [(93, 24, 98, 29, -54, 90), (34, 13, -14, -124, -106, 34),
              (48, 0, -68, 87, -128, -18)]

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(True)  # 自动增益打开
sensor.set_auto_whitebal(False)  # 白平衡关闭

clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs(thresholds, pixels_threshold=2000, merge=False):
        img.draw_rectangle(blob.rect())
        img.draw_cross(blob.cx(), blob.cy())
        print("Pixel = ")
        print(blob.pixels())
        print("\r\n")
        print("Area = ")
        print(blob.area())
# Find Data Matrices Example
#
# This example shows off how easy it is to detect data matrices using the
# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera.

import sensor, image, time, math

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot()
    img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens.

    matrices = img.find_datamatrices()
    for matrix in matrices:
        img.draw_rectangle(matrix.rect(), color = (255, 0, 0))
        print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps())
        print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args)
    if not matrices:
        print("FPS %f" % clock.fps())
예제 #31
0
#
# NOTE: Please reset the camera after running this script to see the new file.
import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)

sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)

FILE_NAME = "desc"
img = sensor.snapshot()
# NOTE: See the docs for other arguments
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)

if (kpts == None):
    raise(Exception("Couldn't find any keypoints!"))

image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME))
img.save("/%s.pgm"%(FILE_NAME))

img.draw_keypoints(kpts)
sensor.snapshot()
예제 #32
0
# A color code is a blob composed of two or more colors. The example below will
# only track colored objects which have both the colors below in them.

import sensor, image, time, math

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
# The below thresholds track in general red/green things. You may wish to tune them...
thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0)
              (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1)
# Codes are or'ed together when "merge=True" for "find_blobs".

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True):
        if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0)
            # These values depend on the blob not being circular - otherwise they will be shaky.
            if blob.elongation() > 0.5:
                img.draw_edges(blob.min_corners(), color=(255,0,0))
#
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
# different object types and works on RGB video data.
#
# In this example we slide the LeNet detector window over the image and get a list of activations
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, nn

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))       # Set 128x128 window.
sensor.skip_frames(time=750)           # Don't let autogain run very long.
sensor.set_auto_gain(False)            # Turn off autogain.
sensor.set_auto_exposure(False)        # Turn off whitebalance.

# Load cifar10 network (You can get the network from OpenMV IDE).
net = nn.load('/cifar10.network')
# Faster, smaller and less accurate.
# net = nn.load('/cifar10_fast.network')
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

clock = time.clock()
while(True):
    clock.tick()

    img = sensor.snapshot()

    # net.search() will search an roi in the image for the network (or the whole image if the roi is not