Exemplo n.º 1
0
def unittest(data_path, temp_path):
    import sensor
    sensor.reset()
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)
    img = sensor.snapshot().clear()
    img.set_pixel(img.width()//2+50, 120, 255)
    img.set_pixel(img.width()//2-50, 120, 255)
    img.draw_line([img.width()//2-50, 50, img.width()//2+50, 50])
    img.draw_rectangle([img.width()//2-25, img.height()//2-25, 50, 50])
    img.draw_circle(img.width()//2, img.height()//2, 40)
    img.draw_string(11, 10, "HelloWorld!")
    img.draw_cross(img.width()//2, img.height()//2)
    sensor.flush()
    img.difference(data_path+"/drawing.pgm")
    stats = img.get_statistics()
    return (stats.max() == 0) and (stats.min() == 0)
Exemplo n.º 2
0
def test_color_bars():

    sensor.reset()
    # Set sensor settings
    sensor.set_brightness(0)
    sensor.set_saturation(0)
    sensor.set_gainceiling(8)
    sensor.set_contrast(2)

    # Set sensor pixel format
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)

    # Enable colorbar test mode
    sensor.set_colorbar(True)

    # Skip a few frames to allow the sensor settle down
    # Note: This takes more time when exec from the IDE.
    for i in range(0, 100):
        image = sensor.snapshot()

    # Color bars thresholds
    t = [lambda r, g, b: r < 50  and g < 50  and b < 50,   # Black
         lambda r, g, b: r < 50  and g < 50  and b > 200,  # Blue
         lambda r, g, b: r > 200 and g < 50  and b < 50,   # Red
         lambda r, g, b: r > 200 and g < 50  and b > 200,  # Purple
         lambda r, g, b: r < 50  and g > 200 and b < 50,   # Green
         lambda r, g, b: r < 50  and g > 200 and b > 200,  # Aqua
         lambda r, g, b: r > 200 and g > 200 and b < 50,   # Yellow
         lambda r, g, b: r > 200 and g > 200 and b > 200]  # White

    # 320x240 image with 8 color bars each one is approx 40 pixels.
    # we start from the center of the frame buffer, and average the
    # values of 10 sample pixels from the center of each color bar.
    for i in range(0, 8):
        avg = (0, 0, 0)
        idx = 40*i+20 # center of colorbars
        for off in range(0, 10): # avg 10 pixels
            rgb = image.get_pixel(idx+off, 120)
            avg = tuple(map(sum, zip(avg, rgb)))

        if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
            raise Exception("COLOR BARS TEST FAILED. "
            "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))

    print("COLOR BARS TEST PASSED...")
Exemplo n.º 3
0
import sensor
import lcd
import image

print("init")
lcd.init(freq=15000000)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.run(1)
sensor.skip_frames(40)
print("init ok")

path = "/sd/image.jpg"
img = sensor.snapshot()
print("save image")
img.save(path)

print("read image")
img_read = image.Image(path)
lcd.display(img_read)
print("ok")

Exemplo n.º 4
0
led_b = GPIO(GPIO.GPIO2, GPIO.OUT)
led_b.value(1)  # 0 : ON, 1: OFF

fm.register(board_info.LED_W, fm.fpioa.GPIO3)
led_w = GPIO(GPIO.GPIO3, GPIO.OUT)
led_w.value(1)  # 0 : ON, 1: OFF

# Init Button
fm.register(board_info.BUTTON_A, fm.fpioa.GPIO4)
button_a = GPIO(GPIO.GPIO4, GPIO.PULL_UP)

# Init LCD
print("init LCD")
lcd.init(type=3, freq=40000000)
lcd.rotation(2)
sensor.reset(dual_buff=True)

# Init camera
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(100)
sensor.run(1)

# Find /SD
if uos.getcwd() != "/sd":
    sd_flag = False
else:
    sd_flag = True

clock = utime.clock()
img_cnt = 0
"""
DCT.py - By: Keiko Nagami - Friday Mar 13 2020
- Takes a Bayer image of size 480x640
- Breaks image up into 8 x 8 pixel blocks
- Gets the quantized DCT coefficients after performing a discrete cosine transform on each block
- Packs 5 tiles in one byte array to send; saves these as a text files
- Run this with camera board, WITH SD card plugged in, in OpenMV IDE
"""

# import relevant libraries
import sensor, image, math, umatrix, ulinalg, ulab, utime

sensor.reset()  # initialize the camera sensor
sensor.set_pixformat(sensor.RGB565)  # sensor.RGB565 takes RGB image
sensor.set_framesize(sensor.VGA)  # sensor.VGA takes 640x480 image
sensor.skip_frames(time=2500)  # let new settings take effect
"""
def jpeg_dct(bayer_tile):

    This function performs the discrete cosine transform (DCT) on raw bayer image data and returns
    the DCT coefficients after they are quantized. Quantization is done to be able to send the DCT
    coefficients as byte array information in range [0,255]. See https://en.wikipedia.org/wiki/JPEG for more.

    bayer_tile: input of one 8x8 pixel tile of bayer information with range [0, 255]
    g: bayer_tile information centered at 0 for range [-128,127]
    G: DCT coefficients as floats
    B: ouput of Quantized DCT coefficients in byte range [0,255]
"""


def jpeg_dct(bayer_tile):
Exemplo n.º 6
0
THRESHOLD = (30, 45, -10, 10, -5, 4)

# edge = (-1,-1,-1,-1,8,-1,-1,-1,-1)  #边缘化
# sharp = (-1,-1,-1,-1,9,-1,-1,-1,-1) #锐化
# relievo = (2,0,0,0,-1,0,0,0,-1)     #浮雕化
import sensor, image, time, lcd
from fpioa_manager import fm
from machine import UART

lcd.init(freq=15000000)
sensor.reset(dual_buff=1)
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)  # Wait for settings take effect.
sensor.set_auto_whitebal(False)  # close auto whitebal
sensor.set_hmirror(1)
sensor.set_vflip(1)
sensor.set_windowing((224, 224))
sensor.set_brightness(0)  # 设置亮度
sensor.set_auto_gain(0)
sensor.run(1)  # run automatically, call sensor.run(0) to stop

#串口初始化
fm.register(10, fm.fpioa.UART2_TX)
fm.register(11, fm.fpioa.UART2_RX)
uart_1 = UART(UART.UART1, 115200, 8, None, 1, timeout=1000, read_buf_len=4096)

clock = time.clock()  # Create a clock object to track the FPS.

Exemplo n.º 7
0
# LCD 示例程序
#
# 提示: 这个实验需要外接一个LCD模块.
#
# 本示例实现了LCD实时显示摄像头帧缓冲画面.
#
#翻译和注释:01Studio

import sensor, image, lcd

#摄像头初始化
sensor.reset()  # 初始化摄像头.
sensor.set_pixformat(sensor.RGB565)  # 或者 sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA2)  # LCD的分辨率为 128x160 .

#LCD初始化
lcd.init()

while (True):
    lcd.display(sensor.snapshot())  # 拍照和显示图像.
Exemplo n.º 8
0
# 单颜色识别,RGB565模式

import sensor, image, time, math
from pyb import UART
import pyb

red = (0, 19, 3, 127, -128, 127)  # 红色阈值
green = (0, 36, -128, -29, -128, 127)  # 绿色阈值
blue = (15, 37, -15, 127, -128, -13)  # 蓝色阈值

# 设置摄像头
sensor.reset()  # 初始化感光元件
sensor.set_pixformat(sensor.RGB565)  # 设置为彩色模式
sensor.set_framesize(sensor.QVGA)  # 设置图像的大小
sensor.skip_frames(time=2000)
#sensor.set_auto_gain(False) # 关闭自动增益
#sensor.set_auto_whitebal(False) # 关闭白平衡


def sekuai():
    rl = []
    gl = []
    bl = []
    while (True):
        number = 0
        rl.clear()
        gl.clear()
        bl.clear()
        img = sensor.snapshot()  # 拍摄一张照片,img为一个image对象
        for blob in img.find_blobs([red, green, blue],
                                   merge=False,
import sensor,image,lcd,time
import KPU as kpu
import urequests

# modules for the connection
import usocket, network, time
import lcd, image
from Maix import GPIO
from machine import UART
from fpioa_manager import fm, board_info

lcd.init(freq=15000000)
#sensor.reset()
sensor.reset(dual_buff=True)#to double fps in exchange of an increase usage of RAM

sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
sensor.set_windowing((224, 224))
sensor.run(1)

classes = ["person"]

tinyYolo = "/sd/models/tinyYoloMerged.kmodel" #16 fps
mbnet75 = "/sd/models/mbnet75Merged.kmodel" #12 fps
mbnet50 = "/sd/models/mbnet50Merged.kmodel" #15fps
mbnet25 = "/sd/models/mbnet25Merged.kmodel" #16fps

task=kpu.load(mbnet50)

try:
import sensor, image, time

green_threshold = (93, 99, -36, -7, -11, 4)
red_threshold = (95, 98, -21, -8, 34, 61)
yellow_threshold = (99, 100, -10, 5, -7, 20)

# 更改此值以调整曝光。试试10.0 / 0.1 /等。
EXPOSURE_TIME_SCALE = 0.43

sensor.reset()  # 复位并初始化传感器。
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
#设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种

sensor.set_framesize(sensor.QQVGA)  # 将图像大小设置为QVGA (320x240)

# 打印出初始曝光时间以进行比较。
print("Initial exposure == %d" % sensor.get_exposure_us())

sensor.skip_frames(30)  # 等待设置生效。
clock = time.clock()  # 创建一个时钟对象来跟踪FPS帧率。

# 您必须关闭自动增益控制和自动白平衡,否则他们将更改图像增益以撤消您放置的任何曝光设置...
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)
# 需要让以上设置生效
sensor.skip_frames(time=500)

current_exposure_time_in_microseconds = sensor.get_exposure_us()
print("Current Exposure == %d" % current_exposure_time_in_microseconds)
Exemplo n.º 11
0
def cal():
    flag=0
    zfx=0
    yx=0
    sjx=0
    r=[0,0,0,0]
    key = 0
    G=0
    while(True):
        key=uart.readchar()
        if key==1:
            break
        sum_zfx=0
        sum_yx=0
        sum_sjx=0
        dis=0
        clock.tick()
        img = sensor.snapshot(1.8)
        #img1 = img.binary(blue)

        for x in templates :
            img = sensor.snapshot(1.8)
            img = img.to_grayscale()
            flag = 0
            for t in x:
                clock.tick()
                img = sensor.snapshot(1.8)
                img = img.to_grayscale()

                template = image.Image(t)
                #ball = image.Image(t)
                if x == zfx_tempaltes:
                    r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
                    if r:
                        print(t)
                        zfx = r
                        sum_zfx=sum_zfx+1
                elif x == yx_tempaltes:
                    for c in img.find_circles(threshold = 3500, x_margin = 10, y_margin = 10, r_margin = 10,r_min = 2, r_max = 100, r_step = 2):
                        img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
                        if c.r()>1:
                            x=c.x()-c.r()
                            y=c.y()-c.r()
                            w=c.r()*2
                            h=c.r()*2
                            r=[x,y,w,h]
                            yx = r
                            sum_yx=20
                elif x == sjx_tempaltes:
                    r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
                    if r:
                        print(t)
                        sjx = r
                        sum_sjx=sum_sjx+1
        if (sum_zfx>sum_yx and sum_zfx>sum_sjx) :
            r=zfx
            t=8#"zfx"
        elif (sum_yx>sum_zfx and sum_yx>sum_sjx) :
            r=yx
            t=9#"yx"
        else:
            r=sjx
            t=10#"sjx"
        if (sum_zfx!=0 or sum_yx!=0 or sum_sjx!=0):

            #change[0]=r[0]+0
            #change[1]=r[1]+0
            #change[2]=r[2]-0
            #change[3]=r[3]-0
            sum_red=0
            sum_green=0
            sum_blue=0
            x=r[0]
            y=r[1]
            w=r[2]
            h=r[3]
            center_x=r[0]+int(r[2]/2)
            center_y=r[1]+int(r[3]/2)
            sensor.reset()
            sensor.set_pixformat(sensor.RGB565)
            sensor.set_framesize(sensor.QQVGA)
            sensor.skip_frames(time = 300)
            sensor.set_auto_gain(False) # must be turned off for color tracking
            sensor.set_auto_whitebal(False) # must be turned off for color tracking
            sensor.set_vflip(False)
            sensor.set_hmirror(False)
            img = sensor.snapshot(1.8)
            #r=list(r)

            i=3
            while(i>0):
                blobs = img.find_blobs(blue,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    #img.draw_cross(center_x, center_y) # cx, cy
                    img.draw_cross(max_blob.cx(), max_blob.cy())
                    #img.draw_line(x+int(w/2),y,x,y+h)
                    #img.draw_line(x,y+h,x+w,y+h)
                    #img.draw_line(x+w,y+h,x+int(w/2),y)#三角形

                    img.draw_circle(x+int(w/2),y+int(h/2),int(w/2))
                    sum_blue=sum_blue+1

                blobs = img.find_blobs(red,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    img.draw_cross(center_x, center_y) # cx, cy
                    img.draw_circle(x+int(w/2),y+int(h/2),int(h/2))
                    sum_red=sum_red+1



                blobs = img.find_blobs(green,roi=r,pixel_threshold=200,area_threshold=200)
                if blobs:

                    max_blob = find_max(blobs)
                    img.draw_rectangle(r) # rect
                    img.draw_cross(center_x, center_y) # cx, cy
                    sum_green=sum_green+1
                i=i-1

            if (sum_red>sum_green and sum_red>sum_blue) :
                flag=5#"red"
            elif (sum_green>sum_red and sum_green>sum_blue) :
                flag=6#"green"
            elif (sum_blue>sum_red and sum_blue>sum_green):
                flag=7#"blue"
            else :
                flag = 0

        if(r==0 or flag == 0):
            print("没找到")
        else:
            Lm = int(r[2]/2)
            K = 25
            G=1
            length = K/Lm
            #edge =
            print("length:",length)
            print("color:",flag,"object:",t,"range:",r,"red:",sum_red,
                    "green:",sum_green,"blue:",sum_blue,"zfx_model:",sum_zfx,"yx_model:",
                    sum_yx,"sjx_model:",sum_sjx)
            uart.writechar(0x55)
            uart.writechar(0x53)
            uart.writechar(flag)
            uart.writechar(t)
            uart.writechar(Lm)
            uart.writechar(K)
            uart.writechar(G)
            uart.writechar(1)
            G=0
            break
Exemplo n.º 12
0
def face_recog(calc_time):
    pin = pyb.millis()
    print(pin)
    cc = 0
    #pyb.elapsed_millis(start)
    while pyb.elapsed_millis(pin) < calc_time:
        print("top of face recog function")
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset() # Initialize the camera sensor.
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others)
        sensor.skip_frames(time = 2000) # Let new settings take affect.
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time = 2000) # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10 # We'll say we detected a face after 10 frames.
        while(diff):
                img = sensor.snapshot()
                faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
        pyb.LED(BLUE_LED_PIN).off()
        print("Face detected! Saving image...")
        pic_name = "snapshot-person.pgm"
        sensor.snapshot().save(pic_name) # Save Pic. to root of SD card -- uos.chdir("/")
        pyb.delay(100)
        snap_img = image.Image(pic_name).mask_ellipse()
        d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
        # face recognition
        pyb.LED(2).on()
        name_lbp_list = []
        uos.chdir("/Faces") # change directory to where all the webex photos from tcp are stored
        for filename in uos.listdir("/Faces"):
            if filename.endswith(".pgm") :
                try:
                    img = None
                    img = image.Image(filename).mask_ellipse()
                    d1 = img.find_lbp((0, 0, img.width(), img.height()))
                    dist = image.match_descriptor(d0, d1,50)
                    word = filename
                    #print(filename)
                    und_loc = word.index('_')
                    word = word[0:(und_loc)]
                    name_lbp_list.append(word)
                    name_lbp_list.append(dist)
                    continue
                except Exception as e:
                    print(e)
                    print("error reading file")
            else:
                print("ERROR")
        print(name_lbp_list)
        #print(len(name_lbp_list))
        end = 0
        name_avg = []
        i = 0
        start = 0
        while i < len(name_lbp_list):
            if ( (i+2) < len(name_lbp_list)) and (name_lbp_list[i] != name_lbp_list[i+2] ) :
                end = i+2
                #print(start)
                #print(end)
                face = []
                face = name_lbp_list[start:end]
                print(face)
                j = 1
                sum_lbp = 0
                while j < len(face):
                    sum_lbp += face[j]
                    j += 2
                name_avg.append(face[0])
                name_avg.append(sum_lbp/(len(face)/2))
                start = i+2
            i += 2
        face = []
        face = name_lbp_list[(end):(len(name_lbp_list))]
        print(face)
        j = 1
        sum_lbp = 0
        while j < len(face):
            sum_lbp += face[j]
            j += 2
        name_avg.append(face[0])
        name_avg.append(sum_lbp/(len(face)/2))
        print(name_avg)
        lbps = []
        k = 1
        while k < len(name_avg):
            lbps.append(name_avg[k])
            k +=2
        print(lbps)
        #print(len(lbps))
        min_lbp = min(lbps)
        print(min_lbp)
        ind = lbps.index(min(lbps))
        #print(ind)
        ind += 1
        found_person = name_avg[2*ind - 2]
        id_name = "The person you are looking at is: " + found_person
        print(id_name)
        #delete snapshot of person
        uos.remove("/snapshot-person.pgm")
        pyb.LED(2).off()
        cc += 1
        print(cc)
Exemplo n.º 13
0
'''
实验名称:画各种图形和写字符
版本: v1.0
日期: 2019.12
作者: 01Studio
'''

import sensor, image, time, lcd

lcd.init(freq=15000000)
sensor.reset()  #复位摄像头
#sensor.set_vflip(1)                 #将摄像头设置成后置方式(所见即所得)

sensor.set_pixformat(sensor.RGB565)  # 设置像素格式 RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # 设置帧尺寸 QVGA (320x240)
sensor.skip_frames(time=2000)  # 灯带设置响应.
clock = time.clock()  # 新建一个时钟对象计算FPS.

while (True):
    clock.tick()
    img = sensor.snapshot()

    # 画线段:从 x0, y0 到 x1, y1 坐标的线段,颜色红色,线宽度 2。
    img.draw_line(20, 20, 100, 20, color=(255, 0, 0), thickness=2)

    #画矩形:绿色不填充。
    img.draw_rectangle(150,
                       20,
                       100,
                       30,
                       color=(0, 255, 0),
Exemplo n.º 14
0
#实验名称:人脸检测
#翻译和注释:01Studio
#参考链接:http://blog.sipeed.com/p/675.html

import sensor, lcd, time
import KPU as kpu

#设置摄像头
sensor.reset(freq=24000000, set_regs=True, dual_buff=True)
sensor.set_auto_gain(1)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_vflip(1)  #设置摄像头后置

lcd.init()  #LCD初始化

clock = time.clock()

#task = kpu.load(0x300000) #需要将模型(face.kfpkg)烧写到flash的 0x300000 位置
task = kpu.load("/sd/face.kmodel")  #模型SD卡上

#模型描参数
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)

#初始化yolo2网络
a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)

while (True):
    clock.tick()
    img = sensor.snapshot()
Exemplo n.º 15
0
    os.chdir("/sd")
    sys.path.append('/sd')
    print("------------------")
    print("Micro SD detected!")
    print("------------------")
    print(os.listdir())
    TF_Card_OK = True
else:
    os.chdir("/flash")
    print("chdir to /flash")
sys.path.append('/flash')

if OV77XX_EN:
    #sensor.reset(freq=20000000, set_regs=True, dual_buff=False) #OV7740  Loop Time :155ms, run fps:6.451613
    #sensor.reset(freq=20000000, set_regs=True, dual_buff=True) #OV7740  Loop Time :91ms, run fps:10.98901
    sensor.reset()
else:
    sensor.reset()  # OV2640 Reset and initialize the sensor. It will
    # run automatically, call sensor.run(0) to stop
sensor_ID = sensor.get_id()
if (sensor_ID == 30530):
    sensor_ID_str = 'OV7740'
#sensor.shutdown(enable)
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.VGA)  #strerror(ENOMEM) = "Alloc memory fail"
#sensor.set_hmirror(1)#for unit V  #bugs??
#sensor.set_vflip(1)#for unit V
img_w = sensor.width()
img_h = sensor.height()
# up +- half of the hoizontal and vertical resolution.

import sensor, image, time

# NOTE!!! You have to use a small power of 2 resolution when using
# find_displacement(). This is because the algorithm is powered by
# something called phase correlation which does the image comparison
# using FFTs. A non-power of 2 resolution requires padding to a power
# of 2 which reduces the usefulness of the algorithm results. Please
# use a resolution like B128X128 or B128X64 (2x faster).

# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128)  # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000)        # Wait for settings take effect.
clock = time.clock()                   # Create a clock object to track the FPS.

# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.
# Hello World 例程
# 欢迎使用OpenMV IDE! 点击左下方绿色按钮运行程序。

import sensor, image, time

sensor.reset()  #复位和初始化摄像头设备
sensor.set_pixformat(sensor.RGB565)  # 设置像素格式为彩色RGB565 (或灰色)
sensor.set_framesize(sensor.QVGA)  # 设置帧大小为 QVGA (320x240)
sensor.skip_frames(time=2000)  # 等待设置生效
clock = time.clock()  # 创建一个时钟来追踪FPS(每秒拍摄帧数)

while (True):
    clock.tick()  # 更新 FPS 时钟.
    img = sensor.snapshot()  # 拍摄一个图片并保存
    print(clock.fps())  # 注意: 当OpenMV连接到IDE时候,运行速度减半,因此当断开IDE时FPS会提升。
Exemplo n.º 18
0
    try:
        #currentImage = max(findMaxIDinDir("/sd/train/" + str(currentDirectory)), findMaxIDinDir("/sd/vaild/" + str(currentDirectory))) + 1
        currentImage = findMaxIDinDir("/sd/train/") + 1
        print("------------------")
        print("##: Current image file index: " + str(currentImage))
        print("------------------")
        time.sleep(0.5)

    except:
        currentImage = 0
        print("Get current image file index failed")
        pass

if OV77XX_EN:
    #sensor.reset(freq=20000000, set_regs=True, dual_buff=False) #OV7740  Loop Time :155ms, run fps:6.451613
    sensor.reset(freq=20000000, set_regs=True,
                 dual_buff=True)  #OV7740  Loop Time :91ms, run fps:10.98901
    #sensor.reset(freq=20000000)
else:
    sensor.reset()  # OV2640 Reset and initialize the sensor. It will
    # run automatically, call sensor.run(0) to stop
#sensor.shutdown(enable)
sensor.set_pixformat(
    sensor.RGB565)  # Set pixel format to RGB565 (or GRAYSCALE)
#sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
#sensor.set_auto_whitebal(True)  #OV2640
#sensor.set_hmirror(1)#for unit V
#sensor.set_vflip(1)#for unit V
sensor.set_framesize(sensor.VGA)
img_w = sensor.width()
img_h = sensor.height()
sensor_ID = sensor.get_id()
    def start_face_rendering(self):
        sensor.reset() # Initialize the camera sensor.
        sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
        sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others)
        sensor.set_windowing((92,112))
        sensor.skip_frames(10) # Let new settings take affect.
        sensor.skip_frames(time = 5000) #等待5s
        s3 = Servo(3) # servo on position 1 (P7)
        #将蓝灯赋值给变量led
        led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
        #SUB = "s1"
        NUM_SUBJECTS = 4 #图像库中不同人数,一共6人
        NUM_SUBJECTS_IMGS = 17 #每人有20张样本图片
        # 拍摄当前人脸。
        img = sensor.snapshot()
        #img = image.Image("singtown/%s/1.pgm"%(SUB))
        d0 = img.find_lbp((0, 0, img.width(), img.height()))
        #d0为当前人脸的lbp特征
        img = None
        pmin = 999999
        self.num=0

        for s in range(1, NUM_SUBJECTS+1):
            dist = 0
            for i in range(2, NUM_SUBJECTS_IMGS+1):
                img = image.Image("singtown/s%d/%d.pgm"%(s, i))
                d1 = img.find_lbp((0, 0, img.width(), img.height()))
                #d1为第s文件夹中的第i张图片的lbp特征
                dist += image.match_descriptor(d0, d1)#计算d0 d1即样本图像与被检测人脸的特征差异度。
            print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS))
            pmin = self.min(pmin, dist/NUM_SUBJECTS_IMGS, s)#特征差异度越小,被检测人脸与此样本更相似更匹配。
            print(pmin)

        print(self.num) # num为当前最匹配的人的编号。
        #TS=3 没戴口罩
        if (pmin>5000) & (TS==3):
            uart.write("-- NO People! --")
            led.off()
        if (pmin>5000) & (TS==1):

            uart.write("-- NO People! --")
            led.off()
        if pmin<=5000:
            if self.num==1:     #匹配到了people_One
                    uart.write("People One      ")
            if self.num==2:
                    uart.write("People Two      ")
            if self.num==3:
                    uart.write("People Three    ")
            if self.num==4:
                    uart.write("People New      ")
            led.on()            #亮灯
            led1.off()
            time.sleep(3500)     #延时1500ms
            led.off()
            for i in range(1,460):
                s3.speed(50) # for continuous rotation servos
                time.sleep(15)
            s3.speed(0)
            time.sleep(1500)
            for i in range(1,230):
                s3.speed(-50)
                time.sleep(15)
            s3.speed(0)
Exemplo n.º 20
0
import sensor, image, time, pyb
from pyb import UART
uart = UART(3, 57600)  #timeout_char =10
from pyb import Pin
p8_pin = pyb.Pin.board.P8
p8_pin.init(Pin.IN, Pin.PULL_UP)

sensor.reset()  # 初始化摄像头
sensor.set_pixformat(sensor.RGB565)  # 格式为 RGB565.
sensor.set_framesize(sensor.QQVGA)  # 使用 QQVGA 速度快一些
sensor.set_auto_whitebal(False)
clock = time.clock()  # 追踪帧率
a = 1
position_X = 80
position_Y = 60
new_point_ready = 0
Has_dected_piont = 0
detect_mode = 1  #   1黑色   0彩色


def led_blink(x):
    led = pyb.LED(x)
    led.on()
    time.sleep(5)
    led.off()


def send_position():
    if (new_point_ready):
        uart.writechar(0xFF)
        uart.writechar(position_X)
Exemplo n.º 21
0
import sensor, image, time, lcd
import KPU as kpu
from fpioa_manager import fm
from machine import I2C
from board import board_info
from Maix import GPIO
i2c = I2C(I2C.I2C0, freq=100000, scl=35, sda=34)  #ตั้ง I2C

#sensor.reset(dual_buff=True)  #กล้องใวใช้แรมเพิ่ม
sensor.reset()  #กล้องปกติ
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
#sensor.skip_frames(time = 2000)
sensor.set_vflip(1)
sensor.run(1)

clock = time.clock()
toss1 = 0

lcd.init(type=2, freq=20000000, color=lcd.BLACK)  #ตั้งค่าจอ

#task = kpu.load(0x400000)  #สำหรับการโหลดโมเดลใน Flash
task = kpu.load("/sd/t6.kmodel")  #สำหรับการโหลดโมเดลใน SD
##datazzz = bytes([int(128),int(128),int(0)])
try:
    while (True):
        toss = toss1
        toss1 = time.ticks_ms()
        #print(1000/(toss1-toss))
        #img = image.Image("/sd/dataset/55.jpg")
        img = sensor.snapshot()
Exemplo n.º 22
0
def def_camInit():
    sensor.reset()
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    sensor.skip_frames(time = 2000)
    img = sensor.snapshot()
Exemplo n.º 23
0
class Maix_dock_device:

    TOF10120_addr = 0x52
    VL53L0X_addr = 0x29
    memaddr = 0xc2
    nbytes = 1

    VL53L0X_REG_IDENTIFICATION_MODEL_ID = 0xc0
    VL53L0X_REG_IDENTIFICATION_REVISION_ID = 0xc2
    VL53L0X_REG_RESULT_INTERRUPT_STATUS = 0x13
    VL53L0X_REG_RESULT_RANGE_STATUS = 0x14
    VL53L0X_REG_SYSRANGE_START = 0x00
    pcf8591_addr = 0x48
    #pcf8591_addr    =   0x4f

    fm.register(1, fm.fpioa.GPIO0)
    fm.register(2, fm.fpioa.GPIO1)
    fm.register(3, fm.fpioa.GPIO2)
    fm.register(13, fm.fpioa.GPIO3)
    fm.register(14, fm.fpioa.GPIO4)
    fm.register(15, fm.fpioa.GPIOHS1)
    fm.register(17, fm.fpioa.GPIOHS2)

    UVC = GPIO(GPIO.GPIO0, GPIO.OUT)
    LED_dis = GPIO(GPIO.GPIO1, GPIO.OUT)
    BEE = GPIO(GPIO.GPIO2, GPIO.OUT)
    TEMP_1 = GPIO(GPIO.GPIO3, GPIO.OUT)
    TEMP_2 = GPIO(GPIO.GPIO4, GPIO.OUT)
    KEY_start = GPIO(GPIO.GPIOHS1, GPIO.IN, GPIO.IRQ_FALLING)
    #key = GPIO(GPIO.GPIOHS0, GPIO.IN, GPIO.PULL_NONE)
    KEY_lock = GPIO(GPIO.GPIOHS2, GPIO.IN, GPIO.PULL_NONE)

    tempBorder_times = 0

    lcd.init()
    sensor.reset()
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QVGA)
    sensor.run(1)
    task = kpu.load(
        0x300000
    )  # you need put model(face.kfpkg) in flash at address 0x300000
    # task = kpu.load("/sd/face.kmodel")
    anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437,
              6.92275, 6.718375, 9.01025)
    a = kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)

    i2c = I2C(I2C.I2C0, mode=I2C.MODE_MASTER, freq=100000, scl=9,
              sda=10)  # software i2c
    i2c_extend = I2C(I2C.I2C1,
                     mode=I2C.MODE_MASTER,
                     freq=100000,
                     scl=11,
                     sda=12)  # software i2c

    def __init__(self):
        print("Maix_dock_device class")
        self.infrared_range_left = 0
        self.infrared_range_right = 0
        self.temperature_sensor_left = 0
        self.temperature_sensor_right = 0
        self.ad48v = 0
        self.face_detection = 0
        self.readAdd_times = 0

    def set_face_detection(self):
        img = sensor.snapshot()
        code = kpu.run_yolo2(self.task, img)
        if code:
            for i in code:
                #print(i)
                #a = img.draw_rectangle(i.rect())
                print("find face")
                self.face_detection = 1
        else:
            print("no face")
            self.face_detection = 0

    def get_face_detection(self):
        return self.face_detection

    def get_infrared_range_left(self):
        return self.infrared_range_left

    def get_infrared_range_right(self):
        return self.infrared_range_right

    def get_ad48v(self):
        return self.ad48v

    def get_temperature_sensor_left(self):
        return self.temperature_sensor_left

    def get_temperature_sensor_left(self):
        return self.temperature_sensor_left

    def set_UVC_OUT(self, value):
        self.UVC.value(value)

    def set_led(self, value):
        self.LED_dis.value(value)

    def get_temperature_sensor_left(self):
        return 20.1

    def get_keyValue_start(self):
        if (self.KEY_start.value() == 1):
            time.sleep_ms(10)
            if (self.KEY_start.value() == 1):
                #print("KEY_start close")
                return 1
        else:
            #print("KEY_start open")
            return 0

    def show_deviceAddr(self):
        devices = self.i2c.scan()
        print(devices)
        devices2 = self.i2c_extend.scan()
        print(devices2)

    def set_infrared_range_left(self):
        try:
            self.i2c.writeto_mem(0x29, 0x00, chr(1))
            time.sleep_ms(10)
            #time.sleep_ms(40)
            data_i2c = self.i2c.readfrom_mem(0x29, 0x1e, 2)
            data_i2c = data_i2c[0] << 8 | data_i2c[1]
            if data_i2c != 20:
                self.infrared_range_left = data_i2c
                return data_i2c
            else:
                print("data_i2c == 20")
                return None
        except OSError as err:
            if err.args[0] == errno.EIO:
                print("i2c1 dis errno.EIO")
                return None
        else:
            print("i2c1 abnormal")
            return None

    def set_infrared_range_right(self):
        try:
            self.i2c_extend.writeto_mem(0x29, 0x00, chr(1))
            time.sleep_ms(10)
            #time.sleep_ms(40)
            data_i2c = self.i2c_extend.readfrom_mem(0x29, 0x1e, 2)
            data_i2c = data_i2c[0] << 8 | data_i2c[1]
            if data_i2c != 20:
                self.infrared_range_right = data_i2c
                return data_i2c
            else:
                print("data_i2c == 20")
                return None
        except OSError as err:
            if err.args[0] == errno.EIO:
                print("i2c2 errno.EIO")
                return None
        else:
            print("i2c2 abnormal")
            return None

    def set_ad48v_chl(self, chn):
        try:
            if chn == 0:
                self.i2c.writeto(self.pcf8591_addr, chr(0x40))
            if chn == 1:
                self.i2c.writeto(self.pcf8591_addr, chr(0x41))
            if chn == 2:
                self.i2c.writeto(self.pcf8591_addr, chr(0x42))
            if chn == 3:
                self.i2c.writeto(self.pcf8591_addr, chr(0x43))
            self.i2c.readfrom(self.pcf8591_addr, 1)
            ad_value = self.i2c.readfrom(self.pcf8591_addr, 1)
            ad_value = ad_value[0] * 58 / 255
            self.ad48v = ad_value - 1.2
            return ad_value
        except OSError as err:
            if err.args[0] == errno.EIO:
                print("i2c1 ad errno.EIO")

    def set_ad48v(self):
        self.set_ad48v_chl(0)

    def set_tof10120_left(self):
        try:
            data_i2c = self.i2c.readfrom_mem(self.TOF10120_addr, 0x00, 2)
            dis_left = data_i2c[0] * 256 + data_i2c[1]
            print("dis_left", '%d' % dis_left)
        except OSError as err:
            if err.args[0] == errno.EIO:
                print("i2c1 tof10120_left errno.EIO")

    def set_tof10120_right(self):
        try:
            data_i2c = self.i2c.readfrom_mem(self.TOF10120_addr + 1, 0x00, 2)
            dis_right = data_i2c[0] * 256 + data_i2c[1]
            print("dis_left", '%d' % dis_right)
        except OSError as err:
            if err.args[0] == errno.EIO:
                print("i2c1 tof10120_right errno.EIO")

    def uvc_autoControl(self):
        tempBorder_times = 0
        if self.readAdd_times == 0:
            self.readAdd_times += 1
            self.show_deviceAddr()
        self.set_infrared_range_left()
        utime.sleep_ms(200)
        self.set_ad48v()
        utime.sleep_ms(200)
        self.set_infrared_range_right()
        utime.sleep_ms(200)
        self.set_face_detection()
        if self.get_keyValue_start():
            if ((self.get_face_detection() == 0) and (self.get_ad48v() > 36)):
                if ((tempBorder_times <= 3)
                        and ((self.get_infrared_range_left() < 500) or
                             (self.get_infrared_range_right() < 500))):
                    self.set_UVC_OUT(1)
                    self.set_led(1)
                    print("uvc_out: 1")
                else:
                    self.set_UVC_OUT(0)
                    self.set_led(0)
                    print("uvc_out: 0")
            print("key_start_down!")
        else:
            print("key_start_up!")
            self.set_UVC_OUT(0)
            self.set_led(0)

        print("infrared_range_left  = ", self.get_infrared_range_left())
        print("infrared_range_right = ", self.get_infrared_range_right())
        print("ad48v = ", self.get_ad48v())
        print("face_detection = ", self.get_face_detection())
Exemplo n.º 24
0
# Hello World Example
#
# Welcome to the MaixPy IDE!
# 1. Conenct board to computer
# 2. Select board at the top of MaixPy IDE: `tools->Select Board`
# 3. Click the connect buttion below to connect board
# 4. Click on the green run arrow button below to run the script!

import sensor, image, time, lcd

lcd.init(freq=15000000)
sensor.reset()  # Reset and initialize the sensor. It will
# run automatically, call sensor.run(0) to stop
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)  # Wait for settings take effect.
#设置相机亮度
sensor.set_brightness(-3)
#设置相机自动增益
#sensor.set_auto_gain(False,0)
#关闭相机的自动曝光,设置一个固定的曝光时间
sensor.set_auto_exposure(False, 15000)
clock = time.clock()  # Create a clock object to track the FPS.

green_threshold = (76, 96, -110, -30, 8, 66)
size_threshold = 2000


def find_max(blobs):
    max_size = 0
import sensor
import image
import time
import ustruct as struct
from pyb import UART

# 红色小球的LAB色彩空间阈值 (L Min, L Max, A Min, A Max, B Min, B Max)
RED_BALL_THRESHOLD = (30, 74, 38, 85, -21, 62)
# ROI搜索半径
ROI_R = 10

# 串口初始化
uart = UART(3, 115200)

# OpenMV感光芯片初始化
sensor.reset()  # 重置感芯片
sensor.set_pixformat(sensor.RGB565)  # 设置像素格式为RGB565
sensor.set_framesize(sensor.QVGA)  # 设置分辨率为QVGA (340 * 240)
sensor.set_vflip(True)  # 纵向翻转
sensor.skip_frames(time=2000)  # 跳过2s内的帧, 等待画质稳定
sensor.set_auto_gain(False)  # 关闭自动增益
sensor.set_auto_whitebal(False)  # 关闭自动白平衡

# 初始化时钟
clock = time.clock()

last_roi = None
while (True):
    clock.tick()  # 开始计时
    img = sensor.snapshot()  # 拍摄一张照片
    blobs = []
Exemplo n.º 26
0
# Selective Search Example

import sensor, image, time
from random import randint

sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)  # Wait for settings take effect.
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False, exposure_us=10000)
clock = time.clock()  # Create a clock object to track the FPS.

while (True):
    clock.tick()  # Update the FPS clock.
    img = sensor.snapshot()  # Take a picture and return the image.
    rois = img.selective_search(threshold=200, size=20, a1=0.5, a2=1.0, a3=1.0)
    for r in rois:
        img.draw_rectangle(r, color=(255, 0, 0))
        #img.draw_rectangle(r, color=(randint(100, 255), randint(100, 255), randint(100, 255)))
    print(clock.fps())
Exemplo n.º 27
0
def init_sensor():
    sensor.reset(freq=22000000)
    sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QQVGA)
Exemplo n.º 28
0
# Log Polar Mapping Example
#
# This example shows off re-projecting the image using a log polar
# transformation. Log polar images are useful in that rotations
# become translations in the X direction and exponential changes
# in scale (x2, x4, etc.) become linear translations in the Y direction.

import sensor, image, time

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA)  # or sensor.QVGA (or others)
sensor.skip_frames(time=2000)  # Let new settings take affect.
clock = time.clock()  # Tracks FPS.

while (True):
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot().logpolar(reverse=False)

    print(clock.fps())  # Note: Your OpenMV Cam runs about half as fast while
    # connected to your computer. The FPS should increase once disconnected.
Exemplo n.º 29
0
import sensor, image, time, math, pyb, json, single_blob, find_line
from pyb import LED, UART, Timer

threshold_index = 0  # 0 for red, 1 for green, 2 for blue

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
thresholds = [
    (21, 84, 22, 90, -18, 58),  # 橙红色块
    (30, 100, -64, -8, -32, 32),  # generic_green_thresholds
    (0, 30, 0, 64, -128, 0)
]  # generic_blue_thresholds

sensor.reset()  # 传感器复位
sensor.set_pixformat(
    sensor.RGB565
)  # RGB565即一个彩色图像点由RGB三个分量组成,总共占据2Byte,高5位为R分量,中间6位为G分量,低5位为B分量
sensor.set_framesize(sensor.QQVGA)  # 320*240
sensor.skip_frames(time=500)  # 跳过,等待摄像头稳定
sensor.set_auto_gain(False)  # 自动增益在颜色识别中一般关闭,不然会影响阈值
sensor.set_auto_whitebal(False)  # 白平衡在颜色识别中一般关闭,不然会影响阈值
clock = time.clock()  # 构造时钟对象

uart = UART(3, 115200)
uart.init(115200, bits=8, parity=None, stop=1,
          timeout_char=1000)  # 使用给定参数初始化 timeout_char是以毫秒计的等待字符间的超时时长


class ctrl_info(object):
    WorkMode = 0x03  # 色块检测模式  0x01为固定单颜色识别  0x02为自主学习颜色识别  0x03 巡线
    Threshold_index = 0x00  # 阈值编号
Exemplo n.º 30
0
fm.register(board_info.LED_B, fm.fpioa.GPIO6)
led_b = GPIO(GPIO.GPIO6, GPIO.OUT)
led_b.value(1) #RGBW LEDs are Active Low


time.sleep(0.5) # Delay for few seconds to see the start-up screen :p

import sensor
import KPU as kpu

err_counter = 0

while 1:
    try:
        sensor.reset() #Reset sensor may failed, let's try sometimes
        break
    except:
        err_counter = err_counter + 1
        if err_counter == 20:
            lcd.draw_string(lcd.width()//2-100,lcd.height()//2-4, "Error: Sensor Init Failed", lcd.WHITE, lcd.RED)
        time.sleep(0.1)
        continue

sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA) #QVGA=320x240
sensor.run(1)

task = kpu.load(0x300000) # Load Model File from Flash
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
# Anchor data is for bbox, extracted from the training sets.
# Blob Detection Example
#
# 这个例子展示了如何使用find_blobs函数来查找图像中的颜色色块。这个例子特别寻找深绿色的物体。

import sensor, image, time

# 为了使色彩追踪效果真的很好,你应该在一个非常受控制的照明环境中。
green_threshold = (21, 51, 30, 72, 6, 63)
# 设置绿色的阈值,括号里面的数值分别是L A B 的最大值和最小值(minL, maxL, minA,
# maxA, minB, maxB),LAB的值在图像左侧三个坐标图中选取。如果是灰度图,则只需
# 设置(min, max)两个数字即可。

# 你可能需要调整上面的阈值来跟踪绿色的东西…
# 在Framebuffer中选择一个区域来复制颜色设置。

sensor.reset()  # 初始化sensor

sensor.set_pixformat(sensor.RGB565)  # use RGB565.
#设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种

sensor.set_framesize(sensor.QQVGA)  # 使用QQVGA的速度。
#设置图像像素大小

sensor.skip_frames(10)  # 让新的设置生效。
sensor.set_auto_whitebal(False)  # turn this off.
#关闭白平衡。白平衡是默认开启的,在颜色识别中,需要关闭白平衡。
clock = time.clock()  # 跟踪FPS帧率

while (True):
    clock.tick()  # 追踪两个snapshots()之间经过的毫秒数.
    img = sensor.snapshot()  # 拍一张照片并返回图像。
# Basic Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.

import sensor, image, pyb, os, time

sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.

if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory

print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    # Replace the image with the "abs(NEW-OLD)" frame difference.
    img.difference("temp/bg.bmp")
Exemplo n.º 33
0
key_gpio = GPIO(GPIO.GPIOHS0, GPIO.IN, GPIO.PULL_UP)
start_processing = False
BOUNCE_PROTECTION = 50


def set_key_state(*_):
    global start_processing
    start_processing = True
    utime.sleep_ms(BOUNCE_PROTECTION)


key_gpio.irq(set_key_state, GPIO.IRQ_RISING, GPIO.WAKEUP_NOT_SUPPORT)

lcd.init()  # 初始化lcd
lcd.rotation(2)
sensor.reset()  #初始化sensor 摄像头
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_hmirror(1)  #设置摄像头镜像
sensor.set_vflip(1)  #设置摄像头翻转
sensor.run(1)  #使能摄像头
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275,
          6.718375, 9.01025)  #anchor for face detect 用于人脸检测的Anchor
dst_point = [
    (44, 59), (84, 59), (64, 82), (47, 105), (81, 105)
]  #standard face key point position 标准正脸的5关键点坐标 分别为 左眼 右眼 鼻子 左嘴角 右嘴角
a = kpu.init_yolo2(task_fd, 0.5, 0.3, 5, anchor)  #初始化人脸检测模型
img_lcd = image.Image()  # 设置显示buf
img_face = image.Image(size=(128, 128))  #设置 128 * 128 人脸图片buf
a = img_face.pix_to_ai()  # 将图片转为kpu接受的格式
Exemplo n.º 34
0
# Single Color Code Tracking Example
#
# This example shows off single color code tracking using the OpenMV Cam.
#
# A color code is a blob composed of two or more colors. The example below will
# only track colored objects which have both the colors below in them.

import sensor, image, time, math

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
# The below thresholds track in general red/green things. You may wish to tune them...
thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0)
              (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1)
# Codes are or'ed together when "merge=True" for "find_blobs".

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True):
Exemplo n.º 35
0
def findCubeCenter():
    """
    find the cube roi position
    """
    global roi
    sensor.set_auto_whitebal(False)
    sensor.set_contrast(2)

    cnt = 1
    LAB_THRESHOLD = (
        (0, 60, -40, 50, -40, 10),  # blue
        (0, 40, -50, 40, -60, 30),  # yellow orange red white
        (0, 50, -40, 15, -25, 70))
    #(0, 70, -25, 15, -60, 30)) # green

    CENTER_THRESHOLD = roi[2] / 3 / 2
    gain = 0
    while (True):

        if cnt > 12:
            sensor.set_auto_gain(gain)

        img = sensor.snapshot()

        if cnt % 60 == 0:
            cnt = 1
            gain += 10

        if (int(cnt / 24)) % 2 == 1:
            lab_threshold = LAB_THRESHOLD[int(cnt / 12) - 2]
            img = img.binary([lab_threshold])
            img = img.dilate(2)
            img = img.erode(2)

        lcd.display(img)

        center_roi = list(
            map(int, [
                roi[0] + roi[2] / 2 - CENTER_THRESHOLD * 2,
                roi[1] + roi[3] / 2 - CENTER_THRESHOLD * 2,
                CENTER_THRESHOLD * 4, CENTER_THRESHOLD * 4
            ]))
        squares = []
        for r in img.find_rects(roi=center_roi, threshold=500):
            if (isSquare(r)):
                squares.append(r)
                img = img.draw_rectangle(r.rect())
                for p in r.corners():
                    img = img.draw_circle(p[0], p[1], 5, color=(0, 255, 0))
                lcd.display(img)
                #time.sleep_ms(5000)
        if not squares:
            cnt += 1
            print(cnt)
        else:
            roi = findCenter(squares, roi, CENTER_THRESHOLD * math.sqrt(2))
            center_roi = list(
                map(int, [
                    roi[0] + roi[2] / 2 - CENTER_THRESHOLD * 2,
                    roi[1] + roi[3] / 2 - CENTER_THRESHOLD * 2,
                    CENTER_THRESHOLD * 4, CENTER_THRESHOLD * 4
                ]))
            img = img.draw_rectangle(center_roi)
            img = img.draw_rectangle(roi)

            lcd.display(img)

            sensor.reset()
            sensor.set_pixformat(sensor.RGB565)
            sensor.set_framesize(sensor.QQVGA)

            sensor.set_auto_whitebal(False)
            sensor.skip_frames(time=60)
            gain = sensor.get_gain_db()
            sensor.set_auto_gain(0, gain)
            sensor.skip_frames(time=60)
            sensor.set_auto_exposure(0, 80000)

            sensor.skip_frames(time=60)
            return 1