コード例 #1
0
# IR Beacon RGB565 Tracking Example
#
# This example shows off IR beacon RGB565 tracking using the OpenMV Cam.

import sensor, image, time

thresholds = (100, 100, 0, 0, 0, 0) # thresholds for bright white light from IR.

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" merges all overlapping blobs in the image.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True):
        ratio = blob.w() / blob.h()
        if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs
            img.draw_rectangle(blob.rect())
            img.draw_cross(blob.cx(), blob.cy())
    print(clock.fps())
コード例 #2
0
import sensor, image, time, pyb

sensor.reset()
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_windowing((120, 120))
sensor.skip_frames(time=2000)

clock = time.clock()
led2 = pyb.LED(2)

min_degreeS = 95
max_degreeS = 112


def buscarDiagonales():
    dL = 0
    for l in img.find_line_segments(merge_distance=40, max_theta_diff=10):
        if (min_degreeS <= l.theta() <= max_degreeS
                and 30 <= l.length() <= 80):
            #img.draw_line(l.line(), color = (0, 0, 0), thickness = 4)
            #print(l)
            dL += 1
        if (dL > 0):
            return True
    else:
        return False


def buscarCirculos():
    ci = 0
コード例 #3
0
# import the lcd optionally
# to use the LCD, uncomment Lines 9, 24, 33, and 100
# and comment Lines 19 and 20 
#import lcd

# reset the camera
sensor.reset()

# sensor settings
sensor.set_pixformat(sensor.GRAYSCALE)

# non LCD settings
# comment the following lines if you are using the LCD
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((640, 240))

# LCD settings
# uncomment this line to use the LCD with valid resolution
#sensor.set_framesize(sensor.QQVGA2)

# additional sensor settings
sensor.skip_frames(2000)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)

# initialize the LCD
# uncomment if you are using the LCD
#lcd.init()

# initialize the clock
コード例 #4
0
ファイル: tracking.py プロジェクト: yankun34/OpenMV-Examples
    uart.write("G0 X250 Y0 Z")
    uart.write(str(start_z) + " F" + str(INIT_ARM_SPEED) + "\r\n")
    if PRINT_ARM_COMM:
        print("TX:" + "G0 X250 Y0 Z" + str(start_z) + " F" + str(INIT_ARM_SPEED) + "\r\n")

#finish the initialization
led_start.off()

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((IMG_WIDTH, IMG_HEIGHT))
sensor.set_pixformat(sensor.GRAYSCALE)

sensor.skip_frames(time = 200)
sensor.set_auto_gain(False, value=100)

def draw_keypoints(img, kpts):
    #print(kpts)
    #img.draw_keypoints(kpts)
    img = sensor.snapshot()
    time.sleep(1000)

time_to_wait = INIT_WAIT

kpts1 = None
# NOTE: uncomment to load a keypoints descriptor from file
コード例 #5
0
# Note: This script does not detect a face first, use it with the telephoto lens.
# 聚焦于的眼睛的瞳孔才能捕捉得到
import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)

# Set resolution to VGA.
sensor.set_framesize(sensor.VGA)

# Bin/Crop image to 200x100, which gives more details with less data to process
sensor.set_windowing((220, 190, 200, 100))

sensor.set_pixformat(sensor.GRAYSCALE)

# Load Haar Cascade
# By default this will use all stages, lower stages is faster but less accurate.
eyes_cascade = image.HaarCascade("eye", stages=24)
print(eyes_cascade)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()
    # Capture snapshot
    img = sensor.snapshot()
コード例 #6
0
# Find Data Matrices w/ Lens Zoom Example
#
# This example shows off how easy it is to detect data matrices using the
# OpenMV Cam M7. Data matrices detection does not work on the M4 Camera.

import sensor, image, time, math

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240)) # 2x Zoom
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot()

    matrices = img.find_datamatrices()
    for matrix in matrices:
        img.draw_rectangle(matrix.rect(), color = (255, 0, 0))
        print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, clock.fps())
        print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args)
    if not matrices:
        print("FPS %f" % clock.fps())
コード例 #7
0
# AprilTags Max Res Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.

import sensor, image, time, math, omv

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger...
# AprilTags works on a maximum of < 64K pixels.
if omv.board_type() == "H7": sensor.set_windowing((240, 240))
elif omv.board_type() == "M7": sensor.set_windowing((200, 200))
else: raise Exception("You need a more powerful OpenMV Cam to run this script")
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...
clock = time.clock()

# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.

# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.

tag_families = 0
tag_families |= image.TAG16H5 # comment out to disable this family
tag_families |= image.TAG25H7 # comment out to disable this family
tag_families |= image.TAG25H9 # comment out to disable this family
tag_families |= image.TAG36H10 # comment out to disable this family
tag_families |= image.TAG36H11 # comment out to disable this family (default family)
tag_families |= image.ARTOOLKIT # comment out to disable this family
コード例 #8
0
ファイル: boot.py プロジェクト: sipeed/MaixPy_scripts
import image

#### image size ####
set_windowing = (224, 224)

#### sensor config ####

sensor.reset(freq=22000000, dual_buff=False)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)  # 320x240
try:
    sensor.set_jb_quality(95)  # for IDE display quality
except Exception:
    pass  # no IDE support
if set_windowing:
    sensor.set_windowing(set_windowing)
# sensor.set_auto_gain(False)
# sensor.set_auto_whitebal(False, rgb_gain_db=(0x52,0x40,0x4d))
# sensor.set_saturation(0)
# sensor.set_brightness(4)
# sensor.set_contrast(0)
# sensor.set_hmirror(True)        # image horizonal mirror
# sensor.set_vflip(True)          # image vertical flip
# sensor.set_auto_whitebal(False)

sensor.skip_frames()

#### lcd config ####
lcd.init(type=1, freq=15000000)
lcd.rotation(2)
コード例 #9
0
"""
MicroPython script for Sipeed camera
"""
import sensor, image, lcd, time
import KPU as kpu

lcd.init()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((128, 128))
sensor.set_vflip(1)
sensor.run(1)
lcd.clear()

f = open('labels.txt', 'r')
labels = f.readlines()
f.close()

print(kpu.memtest())
latency_result = None
fps_result = None


def inference(model_file):
    task = kpu.load(model_file)
    kpu.set_outputs(task, 0, 1, 1, 2)
    clock = time.clock()
    while (True):
        img = sensor.snapshot()
        clock.tick()
コード例 #10
0
ファイル: raogangV3.7.py プロジェクト: Jay-Chou118/Leo
                    sensor.set_framesize(sensor.QVGA)
            else:
                led1.on()
                time.sleep_ms(1000)
                led1.off()
                time.sleep_ms(1000)
                #tx_send_data(33,0,18)
                # 11 -> AA 55 18 02 0B 24
                # 22 -> AA 55 18 02 16 2F
                # 33 -> AA 55 18 02 21 3A
                # 44 -> AA 55 18 02 2C 44
                # 55 -> AA 55 18 02 37 50
    while(Work_mode == 11):#红色距离返回
        sensor.set_pixformat(sensor.RGB565)
        sensor.set_framesize(sensor.VGA)
        sensor.set_windowing((0,220,639,40))
        clock.tick()
        uart_read_buf(uart)
        now_Work_mode = uart_mode_secelt()#这里再次赋值,进行操作判断,通过重新赋值为0,再次进入选择模式状态
        if  now_Work_mode == 0:
            img = sensor.snapshot().histeq(adaptive = True , clip_limit = 3)
            distance_pole(red) #直接进行操作 这里进行数据的发送
        else:
            Work_mode = now_Work_mode
            led3.off()
            pass


    while(Work_mode == 22):#蓝色距离返回
        sensor.set_pixformat(sensor.RGB565)
        sensor.set_framesize(sensor.VGA)
コード例 #11
0
# Snapshot Example
#
# Note: You will need an SD card to run this example.
#
# You can use your OpenMV Cam to save image files.

import sensor, image, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # or sensor.GRAYSCALE
sensor.set_framesize(sensor.B128X128)  # or sensor.QQVGA (or others)
sensor.set_windowing((92, 112))
sensor.skip_frames(10)  # Let new settings take affect.
sensor.skip_frames(time=2000)

num = 1  #设置被拍摄者序号,第一个人的图片保存到s1文件夹,第二个人的图片保存到s2文件夹,以此类推。每次更换拍摄者时,修改num值。

n = 20  #设置每个人拍摄图片数量。

#连续拍摄n张照片,每间隔3s拍摄一次。
while (n):
    #红灯亮
    pyb.LED(RED_LED_PIN).on()
    sensor.skip_frames(
        time=3000)  # Give the user time to get ready.等待3s,准备一下表情。

    #红灯灭,蓝灯亮
    pyb.LED(RED_LED_PIN).off()
コード例 #12
0
# main.py -- put your code here!
import pyb, time,sensor,image
from pyb import UART

green_led = pyb.LED(2)

green_led .off()                          # 关闭所有LED
color  = (   0,   45,  -20,   40,   -4 0,  0)
uart = UART(3, 115200)
uart.init(115200, bits=8, parity=None, stop=1)
sensor.reset()                          # 初始化摄像头
sensor.set_pixformat(sensor.RGB565)     # use RGB565.
sensor.set_framesize(sensor.QQVGA)      # use QQVGA for speed.
sensor.set_windowing((80, 80))
sensor.skip_frames(10)                  # 跳过10帧
sensor.set_auto_gain(False)             # must be turned off for color tracking
sensor.set_auto_whitebal(False)         # 关闭白平衡。白平衡是默认开启的,在颜色识别中,需要关闭白平衡。
clock = time.clock()                    # 追踪 FPS.

while(True):
    clock.tick() # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot() # Take a picture and return the image.

    blobs = img.find_blobs([color])
    #find_blobs(thresholds, invert=False, roi=Auto),thresholds为颜色阈值,
    #是一个元组,需要用括号[ ]括起来。invert=1,反转颜色阈值,invert=False默认
    #不反转。roi设置颜色识别的视野区域,roi是一个元组, roi = (x, y, w, h),代表
    #从左上顶点(x,y)开始的宽为w高为h的矩形区域,roi不设置的话默认为整个图像视野。
    #这个函数返回一个列表,[0]代表识别到的目标颜色区域左上顶点的x坐标,[1]代表
    #左上顶点y坐标,[2]代表目标区域的宽,[3]代表目标区域的高,[4]代表目标
    #区域像素点的个数,[5]代表目标区域的中心点x坐标,[6]代表目标区域中心点y坐标,
コード例 #13
0
ファイル: Main_Twatch.py プロジェクト: wexa/CorgiDude
import struct
from time import sleep_ms, ticks_ms, ticks_diff
######## UART for Temperature
fm.register (2, fm.fpioa.UART1_TX)
fm.register (3, fm.fpioa.UART1_RX)
uart_temp = UART (UART.UART1, 115200, 8, None, 1, timeout = 1000, read_buf_len = 4096)
######## GPIO For trig thermometer
fm.register(10,  fm.fpioa.GPIO1, force=True)
triger=GPIO(GPIO.GPIO1,GPIO.OUT)
triger.value(0)

######## Config Camera and Display
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((320, 224))
sensor.set_vflip(1)
sensor.run(1)
lcd.init(type=2, freq=20000000, color=lcd.BLACK)
######### config facemask detection
task = kpu.load(0x400000)
a = kpu.set_outputs(task, 0, 10,7,35)
anchor = (0.212104,0.261834, 0.630488,0.706821, 1.264643,1.396262, 2.360058,2.507915, 4.348460,4.007944)
a = kpu.init_yolo2(task, 0.5, 0.5, 5, anchor)
######### config detection
stage = 0
# 0 = init
# 1 = wait for user waring mask
# 2 = ask for approching
# 3 = wait for approching
# 4 = check temperature
コード例 #14
0
ファイル: apriltag_v3.py プロジェクト: soon14/K210
import sensor
import image
import lcd
import time
from machine import UART, Timer
from fpioa_manager import fm
import struct

clock = time.clock()
lcd.init()
sensor.reset(freq=24000000, set_regs=True, dual_buff=True)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)

#此处可修改输入图像大小,经验证160*120大小时,最流畅。
sensor.set_windowing((224, 160))
#修改摄像头安装方式:1/0表示正面或反面
sensor.set_vflip(1)
sensor.run(1)

#注册与飞控通讯的串口
fm.register(6, fm.fpioa.UART1_RX, force=True)
fm.register(7, fm.fpioa.UART1_TX, force=True)

####lcd调试开关,打开方便调试,关掉略微增加帧率
LCD_off = 0

##########################################编写有关Mavlink协议有关的代码#################################

# 设置MAVlink的几个字节的信息
MAV_system_id = 1
コード例 #15
0
# Edge Impulse - OpenMV Image Classification Example

import sensor, image, time, os, tf
import pyb, ustruct

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240))       # Set 240x240 window.
sensor.skip_frames(time=2000)          # Let the camera adjust.

net = "trained.tflite"
labels = [line.rstrip('\n') for line in open("labels.txt")]

clock = time.clock()


text = "This is a test!\n"
data = ustruct.pack("<%ds" % len(text), text)


bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
bus.deinit()
bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
print("Waiting for Arduino...")


while(True):
    clock.tick()

    img = sensor.snapshot()
コード例 #16
0
from machine import I2C
from fpioa_manager import *
from board import board_info
from fpioa_manager import fm
from machine import Timer, PWM

lcd.init()
lcd.rotation(2)

img_w = 224
img_h = 224

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((img_w, img_h))
sensor.set_vflip(2)
sensor.run(1)

lcd.clear()
lcd.draw_string(100, 96, "MobileNet Demo")
lcd.draw_string(100, 112, "Loading labels...")

#os.remove("/flash/boot.py")

print(os.getcwd())

print(os.listdir("/"))
print(os.listdir("/flash"))
#print(os.listdir("/sd"))
コード例 #17
0
# QRCode Example
#
# This example shows the power of the OpenMV Cam to detect QR Codes
# without needing lens correction.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((240, 240)) # look at center 240x240 pixels of the VGA resolution.
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot()
    for code in img.find_qrcodes():
        img.draw_rectangle(code.rect(), color = 127)
        print(code)
    print(clock.fps())
コード例 #18
0
ファイル: gettag_0429.py プロジェクト: rzwhoom/sjdw_MVL
sensor.set_framesize(sensor.VGA)  #VGA:640*480;QVGA:320*240;QQVGA:160*120
sensor.skip_frames(time=2000)  #延时,待镜头稳定
sensor.set_auto_gain(False)  #关闭自动增益
sensor.set_auto_whitebal(False)  #关闭自动白平衡
clock = time.clock()  #建立计时对象
i = 0  #建立计数对象
area = [(0, 0, 240, 180), (200, 0, 240, 180), (400, 0, 240, 180),
        (0, 150, 240, 180), (200, 150, 240, 180), (400, 150, 240, 180),
        (0, 300, 240, 180), (200, 300, 240, 180), (400, 300, 240, 180)]
#第一次需要分区带重合扫描得到目标坐标和下一次扫描画面的起始坐标
#每个分区大小为240*180,已是实验最优值。

while (True):
    flag = 0  #设置捕获标记,起始为0
    for a in area:  #历遍所有分区
        sensor.set_windowing(a)  #设置扫描区域
        clock.tick()  #帧率计时,仅为笔记本查看帧率需要
        img = sensor.snapshot()  #获取画面

        for tag in img.find_apriltags(
                families=image.TAG36H11):  #在画面中找到AprilTag对象
            img.draw_rectangle(tag.rect(),
                               color=(255, 0, 0))  #画出Tag区域,仅为笔记本查看需要
            img.draw_cross(tag.cx(), tag.cy(),
                           color=(0, 255, 0))  #画出Tag中心,仅为笔记本查看需要
            cx = tag.cx() + a[0]  #得到真实画面横坐标
            cy = tag.cy() + a[1]  #得到真实画面纵坐标
            taglist = []  #建立空列表
            taglist.append(tag.id())  #列表中加入id
            taglist.append(cx)  #列表中加入真实画面横坐标
            taglist.append(cy)  #列表中加入真实画面纵坐标
コード例 #19
0
# CIFAR-10 Search Just Center Example
#
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
# different object types and works on RGB video data.
#
# In this example we slide the LeNet detector window over the image and get a list of activations
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.

import sensor, image, time, os, nn

sensor.reset()                         # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565)    # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)      # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))       # Set 128x128 window.
sensor.skip_frames(time=750)           # Don't let autogain run very long.
sensor.set_auto_gain(False)            # Turn off autogain.
sensor.set_auto_exposure(False)        # Turn off whitebalance.

# Load cifar10 network (You can get the network from OpenMV IDE).
net = nn.load('/cifar10.network')
# Faster, smaller and less accurate.
# net = nn.load('/cifar10_fast.network')
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

clock = time.clock()
while(True):
    clock.tick()

    img = sensor.snapshot()
コード例 #20
0
import sensor, image, time, pyb
from pyb import UART
import json
green_threshold = (7, 120)
uart = UART(3, 115200, timeout_char=1000000)
ROI = (51, 18, 196, 196)
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing(ROI)
sensor.skip_frames(10)
sensor.set_auto_gain(True)
sensor.set_auto_whitebal(False)
clock = time.clock()
led = pyb.LED(3)
while (True):
    clock.tick()
    img = sensor.snapshot()
    blobs = img.find_blobs([green_threshold],
                           x_stride=2,
                           y_stride=2,
                           area_threshold=6)
    led.on()
    if blobs:
        for b in blobs:
            img.draw_cross(b[5], b[6])
            print(b[5], b[6])
            uart.writechar(0xFF)
            uart.writechar(0xEE)
            uart.writechar(b[5])
            uart.writechar(b[6])
コード例 #21
0
import sensor, image, lcd
import KPU as kpu
from fpioa_manager import fm
from machine import UART

lcd.init()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((224, 224))
sensor.set_vflip(1)
sensor.run(1)
fm.register(board_info.PIN15, fm.fpioa.UART1_TX)
fm.register(board_info.PIN17, fm.fpioa.UART1_RX)
uart_A = UART(UART.UART1, 115200, 8, None, 1, timeout=1000, read_buf_len=4096)

classes = ["racoon"]
task = kpu.load(0x600000)
anchor = (0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282,
          3.52778, 9.77052, 9.16828)
a = kpu.init_yolo2(task, 0.3, 0.3, 5, anchor)
while (True):
    img = sensor.snapshot()
    #.rotation_corr(z_rotation=90.0)
    #a = img.pix_to_ai()
    code = kpu.run_yolo2(task, img)
    if code:
        for i in code:
            a = img.draw_rectangle(i.rect(), color=(0, 255, 0))
            a = img.draw_string(i.x(),
                                i.y(),
コード例 #22
0
# CMSIS CNN example.
import sensor, image, time, os

sensor.reset()                          # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.RGB565)     # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)       # Set frame size to QVGA (320x240)
sensor.set_windowing((200, 200))        # Set 128x128 window.
sensor.skip_frames(time = 100)          # Wait for settings take effect.
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)

labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

clock = time.clock()    # Create a clock object to track the FPS.
while(True):
    clock.tick()        # Update the FPS clock.
    img = sensor.snapshot().lens_corr(1.6)  # Take a picture and return the image.
    out = img.classify_object()
    # print label_id:confidence
    #for i in range(0, len(out)):
    #    print("%s:%d "%(labels[i], out[i]), end="")
    max_idx = out.index(max(out))
    print("%s : %0.2f%% "%(labels[max_idx], (out[max_idx]/128)*100))

    #print(clock.fps())             # Note: OpenMV Cam runs about half as fast when connected
                                    # to the IDE. The FPS should increase once disconnected.
コード例 #23
0
# Single Color RGB565 Blob Tracking Example
#
# This example shows off single color RGB565 tracking using the OpenMV Cam.

import sensor, image, time, math

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
# The below thresholds track in general red/green/blue things. You may wish to tune them...

screen = 400  #450
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((int((640 - screen) / 2), int(
    (480 - screen) / 2), screen, screen))
sensor.set_auto_exposure(False, 5000)
sensor.set_auto_gain(False)  # must be turned off for color tracking
sensor.set_auto_whitebal(False)  # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" merges all overlapping blobs in the image.

while (True):
    clock.tick()
    img = sensor.snapshot()

    blobs = img.find_blobs([(15, 94, -57, -12, -48, -10)],
                           pixels_threshold=200,
                           area_threshold=200,
コード例 #24
0
# MicroPython v0.5.0-29-g97fad3a on 2020-03-13; Sipeed_M1 with kendryte-k210

# Importe de librerias
import sensor, image, lcd, time, utime
import KPU as kpu

# Configuración inicial de la pantalla LCD y la camara OV2640
lcd.init()  # Inicializa la pantalla
sensor.reset()  # Inicializa la camara
sensor.set_pixformat(sensor.RGB565)  # Define el formato de color de la imagen
sensor.set_framesize(
    sensor.QVGA)  # Establece la captura de imagen como QVGA (320x240)
sensor.set_windowing(
    (224, 224))  # Establece el tamaño de imagen con el que se entreno la red
sensor.set_vflip(1)  # Rotación vertical de la imagen
sensor.set_saturation(-3)  # Saturacion
sensor.set_brightness(-3)  # brightness
sensor.set_contrast(-3)  # contrast
lcd.clear()  # Limpia la pantalla y la deja en negro

# Descripción y carga del modelo
labels = ['Acaro', 'Bueno',
          'Manchado']  # Etiquetas de la ultima capa de la red
task = kpu.load(
    '/sd/3clases.kmodel')  # Acá va al ubicación del archivo .kmodel   (CARGA)
kpu.set_outputs(task, 0, 1, 1,
                3)  # Aqúi van las dimensiones de la ultima capa de la red

while (True):

    tick1 = utime.ticks_ms()
コード例 #25
0
# QRCode Example
#
# This example shows the power of the OpenMV Cam to detect QR Codes
# without needing lens correction.

import sensor, image, time, pyb

green_led = pyb.LED(2)
blue_led = pyb.LED(3)
blue_led.off()
green_led.off()

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing(
    (240, 240))  # look at center 240x240 pixels of the VGA resolution.
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...

clock = time.clock()

startTime = 0
greenOnTime = 0
blueBlinkTime = 0

green_led_is_on = False

while (True):
    clock.tick()
    img = sensor.snapshot()
    codes = img.find_qrcodes()
コード例 #26
0
# AprilTags Max Res Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.

import sensor, image, time, math, omv, pyb, ustruct

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger...
# AprilTags works on a maximum of < 64K pixels.
if omv.board_type() == "H7": sensor.set_windowing((240, 240))
elif omv.board_type() == "M7": sensor.set_windowing((200, 200))
else: raise Exception("You need a more powerful OpenMV Cam to run this script")
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...
clock = time.clock()

text = '...\n'
data = ustruct.pack("<%ds" % len(text), text)
bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
bus.deinit()
bus = pyb.I2C(2, pyb.I2C.SLAVE, addr=0x12)
print("Waiting for Arduino...")

# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.

# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.
コード例 #27
0
ファイル: nn_cifar10.py プロジェクト: tiangaomingjing/openmv
# CIFAR10 Example
import sensor, image, time, os, nn

sensor.reset()  # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.RGB565)  # Set pixel format to RGB565
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128))  # Set 128x128 window.
sensor.skip_frames(time=1000)
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)

# Load cifar10 network
net = nn.load('/cifar10.network')
# Faster, smaller and less accurate.
#net = nn.load('/cifar10_fast.network')
labels = [
    'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
    'ship', 'truck'
]

clock = time.clock()  # Create a clock object to track the FPS.
while (True):
    clock.tick()  # Update the FPS clock.
    img = sensor.snapshot()  # Take a picture and return the image.
    out = net.forward(img)
    max_idx = out.index(max(out))
    score = int(((out[max_idx] + 128) / 255) * 100)
    if (score < 70):
        score_str = "??:??%"
    else:
コード例 #28
0
# This example shows off using the keypoints feature of your OpenMV Cam to track
# a face after it has been detected by a Haar Cascade. The first part of this
# script finds a face in the image using the frontalface Haar Cascade.
# After which the script uses the keypoints feature to automatically learn your
# face and track it. Keypoints can be used to automatically track anything.
import sensor, time, image, pyb

RED_LED_PIN = 1
BLUE_LED_PIN = 3

# Reset sensor
sensor.reset()
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)

# Skip a few frames to allow the sensor settle down
sensor.skip_frames(time=2000)

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# First set of keypoints
kpts1 = None

while (True):
コード例 #29
0
ファイル: donkey_init.py プロジェクト: aakoch/robocar
device = pyb.UART(3, 19200, timeout_char = 1000)

# throttle [0:100] (101 values) -> [THROTTLE_SERVO_MIN_US, THROTTLE_SERVO_MAX_US]
# steering [0:180] (181 values) -> [STEERING_SERVO_MIN_US, STEERING_SERVO_MAX_US]
def set_servos(throttle, steering):
    throttle = THROTTLE_SERVO_MIN_US + ((throttle * (THROTTLE_SERVO_MAX_US - THROTTLE_SERVO_MIN_US + 1)) / 101)
    steering = STEERING_SERVO_MIN_US + ((steering * (STEERING_SERVO_MAX_US - STEERING_SERVO_MIN_US + 1)) / 181)
    device.write("{%05d,%05d}\r\n" % (throttle, steering))

# Camera Control Code
sensor.reset()
sensor.set_pixformat(sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE)
sensor.set_framesize(FRAME_SIZE)
sensor.set_vflip(True)
sensor.set_hmirror(True)
sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \
                     int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION) - BOTTOM_PX_TO_REMOVE))
sensor.skip_frames(time = 200)
if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False)
if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False)
clock = time.clock()

old_time = pyb.millis()

throttle_old_result = None
throttle_i_output = 0
throttle_output = THROTTLE_OFFSET

steering_old_result = None
steering_i_output = 0
steering_output = STEERING_OFFSET
コード例 #30
0
ファイル: faceVGA.py プロジェクト: saimaza123/smartdevice
        break
    except:
        err_counter = err_counter + 1
        if err_counter == 20:
            print("Error: Sensor Init Failed")
            #lcd.draw_string(lcd.width()//2-100,lcd.height()//2-4, "Error: Sensor Init Failed", lcd.WHITE, lcd.RED)
        time.sleep(0.1)
        continue
gc.collect()
sensor.set_pixformat(sensor.RGB565)
#sensor.set_pixformat(sensor.GRAYSCALE)
#sensor.set_pixformat(sensor.BAYER)
sensor.set_framesize(sensor.VGA)  #QVGA=320x240
#sensor.set_framesize(sensor.VGA)
img_win = (80, 60, 480, 360)
sensor.set_windowing(img_win)
if Sensor_img_flip:
    sensor.set_hmirror(1)  #for unit V  #bugs??
    sensor.set_vflip(1)  #for unit V
img_w = sensor.width()
img_h = sensor.height()
sensor_ID = sensor.get_id()
if (sensor_ID == 30530):
    sensor_ID_str = 'OV7740'
print("image sensor is " + str(sensor_ID_str) + ", with size " + str(img_w) +
      " x " + str(img_h))
sensor.skip_frames(time=600)
#sensor.run(1)
#https://maixpy.sipeed.com/zh/libs/Maix/kpu.html
gc.collect()
print("GC free mem before yolo2 load: " + str(gc.mem_free() / 1000) + "KB")
コード例 #31
0
import KPU as kpu
import ulab as np
import sys


lcd.init(freq=40000000)
lcd.rotation(2)  # Rotate the lcd 180deg
sensor.reset(dual_buff=True)

sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.B64X64)
window_width = 32
window_height = 32
classes = 5
Scale = 2
sensor.set_windowing((window_height, window_width))
sensor.skip_frames(100)
sensor.run(1)

print("init kpu")
lcd.draw_string(10, 10, "init kpu")
lcd.draw_string(170, 10, "Running")

lcd.draw_string(10, 30, "load kmodel")
kpu.memtest()
task = kpu.load(0x500000)
lcd.draw_string(170, 30, "Done")

lcd.draw_string(10, 50, "set outputs")
fmap = kpu.set_outputs(task, 0, window_height, window_width, classes)
kpu.memtest()
コード例 #32
0
ファイル: ALODeCK_2.1.py プロジェクト: rhoadesScholar/ALODeCK
              (15, 100, 15, 127, -128, -30), # generic_blue_thresholds -> index is 2 so code == (1 << 2)
              (20, 100, -54, -1, 7, 53)] # generic_IR_thresholds -> index is 3 so code == (1 << 3)
radius = 40
windowX = 240
windowY = 240
buttColor = 4
headColor = 1
fps = 120

#SETUP
sensor.reset()
sensor.set_hmirror(True)
sensor.set_vflip(True)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((windowX, windowY)) # 240x240 center pixels of VGA
sensor.set_auto_gain(False, gain_db = 20) # must be turned off for color tracking
sensor.set_gainceiling(128)
sensor.set_auto_whitebal(False, rgb_gain_db = (-6.0, -3.0, 2)) # must be turned off for color tracking
#sensor.set_brightness(-1)
sensor.set_saturation(3)
#sensor.set_quality(100)
#sensor.set_auto_exposure(False, 1000)
#sensor.set_contrast(3)
sensor.skip_frames(time = 2000)
clock = time.clock()

kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc.
kernel = [-2, -1,  0, \
          -1,  6,  -1, \
           0,  -1,  -2]
コード例 #33
0
# Face Tracking Example
#
# This example shows off using the keypoints feature of your OpenMV Cam to track
# a face after it has been detected by a Haar Cascade. The first part of this
# script finds a face in the image using the frontalface Haar Cascade.
# After which the script uses the keypoints feature to automatically learn your
# face and track it. Keypoints can be used to automatically track anything.
import sensor, time, image

# Reset sensor
sensor.reset()
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)

# Skip a few frames to allow the sensor settle down
sensor.skip_frames(time = 2000)

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# First set of keypoints
kpts1 = None

# Find a face!
while (kpts1 == None):
    img = sensor.snapshot()
コード例 #34
0
# AprilTags Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.

import sensor, image, time, math

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger...
sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution.
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...
clock = time.clock()

# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.

# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively
# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which
# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve
# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a
# reason to use the other tags families just use TAG36H11 which is the default family.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for tag in img.find_apriltags(): # defaults to TAG36H11
        img.draw_rectangle(tag.rect(), color = (255, 0, 0))
        img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
        print_args = (tag.id(), (180 * tag.rotation()) / math.pi)
コード例 #35
0
# LetNet Example
import sensor, image, time

sensor.reset()                      # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)   # Set frame size to QVGA (320x240)
sensor.set_windowing((28, 28))

sensor.skip_frames(time = 2000)     # Wait for settings take effect.
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)

clock = time.clock()                # Create a clock object to track the FPS.

while(True):
    clock.tick()                    # Update the FPS clock.
    img = sensor.snapshot()         # Take a picture and return the image.
    out = img.invert().find_number()
    if out[1] > 3.0:
        print(out[0])
    #print(clock.fps())             # Note: OpenMV Cam runs about half as fast when connected
                                    # to the IDE. The FPS should increase once disconnected.
コード例 #36
0
from pyb import LED
blue_led = LED(3)
switch = Pin('P4', Pin.IN, Pin.PULL_NONE)
led = Pin('P9', Pin.OUT_PP, Pin.PULL_NONE)

while (True):

    if (switch.value()):
        led.high()

        sensor.reset()
        sensor.set_pixformat(sensor.RGB565)
        sensor.set_framesize(sensor.VGA)

        #lcd.set_backlight(True)
        sensor.set_windowing((200, 260))  # 2x Zoom
        sensor.skip_frames(time=2000)

        sensor.set_auto_gain(
            False)  # must turn this off to prevent image washout...
        sensor.set_auto_whitebal(
            False)  # must turn this off to prevent image washout...

        #clock = time.clock()
        lcd.init()
        while (switch.value()):
            #clock.tick()
            img = sensor.snapshot()

            matrices = img.find_datamatrices()
            for matrix in matrices:
コード例 #37
0
ファイル: iris_detection.py プロジェクト: michaelchi08/openmv
# Note: This script does not detect a face first, use it with the telephoto lens.

import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)

# Set resolution to VGA.
sensor.set_framesize(sensor.VGA)

# Bin/Crop image to 200x100, which gives more details with less data to process
sensor.set_windowing((220, 190, 200, 100))

sensor.set_pixformat(sensor.GRAYSCALE)

# Load Haar Cascade
# By default this will use all stages, lower stages is faster but less accurate.
eyes_cascade = image.HaarCascade("eye", stages=24)
print(eyes_cascade)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()
    # Capture snapshot
    img = sensor.snapshot()
コード例 #38
0
deg = 0
r = 0
x = 0
y = 0
uart = UART(1, 115200)
UARTRecieve = 0
bytesToSend = 0

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False, gain_db=14)
sensor.set_auto_whitebal(False, rgb_gain_db=(-8, -5.8, -1.5))
sensor.set_auto_exposure(False, 25000)
sensor.set_windowing((cropX, 0, cropSize, cropSize))
sensor.set_hmirror(True)
clock = time.clock()


def sortStuff(stuff):
    return stuff[4]


while (True):
    #print(sensor.get_rgb_gain_db())
    #print(clock.fps())
    clock.tick()
    img = sensor.snapshot()
    blobs = []
    blobs2 = [[], [], []]
コード例 #39
0
# IR Beacon Grayscale Tracking Example
#
# This example shows off IR beacon Grayscale tracking using the OpenMV Cam.

import sensor, image, time

thresholds = (255, 255) # thresholds for bright white light from IR.

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" merges all overlapping blobs in the image.

while(True):
    clock.tick()
    img = sensor.snapshot()
    for blob in img.find_blobs([thresholds], pixels_threshold=200, area_threshold=200, merge=True):
        ratio = blob.w() / blob.h()
        if (ratio >= 0.5) and (ratio <= 1.5): # filter out non-squarish blobs
            img.draw_rectangle(blob.rect())
            img.draw_cross(blob.cx(), blob.cy())
    print(clock.fps())
コード例 #40
0
# Barcode Example
#
# This example shows off how easy it is to detect bar codes using the
# OpenMV Cam M7. Barcode detection does not work on the M4 Camera.

import sensor, image, time, math

sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # High Res!
sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed).
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)  # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False)  # must turn this off to prevent image washout...
clock = time.clock()

# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's
# OV7725 camera module. Barcode detection will also work in RGB565 mode but at
# a lower resolution. That said, barcode detection requires a higher resolution
# to work well so it should always be run at 640x480 in grayscale...

def barcode_name(code):
    if(code.type() == image.EAN2):
        return "EAN2"
    if(code.type() == image.EAN5):
        return "EAN5"
    if(code.type() == image.EAN8):
        return "EAN8"
    if(code.type() == image.UPCE):
        return "UPCE"
    if(code.type() == image.ISBN10):