def test_color_bars(): sensor.reset() # Set sensor settings sensor.set_brightness(0) sensor.set_saturation(0) sensor.set_gainceiling(8) sensor.set_contrast(2) # Set sensor pixel format sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) # Enable colorbar test mode sensor.set_colorbar(True) # Skip a few frames to allow the sensor settle down # Note: This takes more time when exec from the IDE. for i in range(0, 100): image = sensor.snapshot() # Color bars thresholds t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue lambda r, g, b: r > 200 and g < 50 and b < 50, # Red lambda r, g, b: r > 200 and g < 50 and b > 200, # Purple lambda r, g, b: r < 50 and g > 200 and b < 50, # Green lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow lambda r, g, b: r > 200 and g > 200 and b > 200] # White # 320x240 image with 8 color bars each one is approx 40 pixels. # we start from the center of the frame buffer, and average the # values of 10 sample pixels from the center of each color bar. for i in range(0, 8): avg = (0, 0, 0) idx = 40*i+20 # center of colorbars for off in range(0, 10): # avg 10 pixels rgb = image.get_pixel(idx+off, 120) avg = tuple(map(sum, zip(avg, rgb))) if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): raise Exception("COLOR BARS TEST FAILED. " "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) print("COLOR BARS TEST PASSED...")
def unittest(data_path, temp_path): import sensor sensor.reset() sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.GRAYSCALE) img = sensor.snapshot().clear() img.set_pixel(img.width()//2+50, 120, 255) img.set_pixel(img.width()//2-50, 120, 255) img.draw_line([img.width()//2-50, 50, img.width()//2+50, 50]) img.draw_rectangle([img.width()//2-25, img.height()//2-25, 50, 50]) img.draw_circle(img.width()//2, img.height()//2, 40) img.draw_string(11, 10, "HelloWorld!") img.draw_cross(img.width()//2, img.height()//2) sensor.flush() img.difference(data_path+"/drawing.pgm") stats = img.get_statistics() return (stats.max() == 0) and (stats.min() == 0)
err_y = 0 #计数器 timercnt = 0 timerflag = 1 #发送数据 uart_buf = bytearray([ 0x55, 0xAA, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xAA ]) #串口三配置 uart = UART(3, 115200) uart.init(115200, bits=8, parity=None, stop=1) sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) #设置灰度信息 sensor.set_framesize(sensor.QQVGA) #设置图像大小 sensor.skip_frames(20) #相机自检几张图片 sensor.set_auto_whitebal(False) #关闭白平衡 clock = time.clock() #打开时钟 while (True): clock.tick() img = sensor.snapshot() #寻找blob blobs = img.find_blobs(black_threshold) if blobs: most_pixels = 0 largest_blob = 0 for i in range(len(blobs)): #目标区域找到的颜色块可能不止一个,找到最大的一个 if blobs[i].pixels() > most_pixels: most_pixels = blobs[i].pixels()
lightsoff = False def turnofflights(): #lightson = False #if (lightsoff == False): LED(1).off() LED(2).off() LED(3).off() LED(4).off() lightsoff = True sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. #print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected stats = img.get_statistics() print(stats) # to the IDE. The FPS should increase once disconnected. max = stats.l_max() print(max) if (max < 20): turnonlights() elif(max > 90): turnofflights()
import sensor, image, time # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B128X128 or B128X64 (2x faster). # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)... sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. for y in range(0, sensor.height(), BLOCK_H):
pinADir0.value(1) pinADir1.value(0) pinBDir0.value(0) pinBDir1.value(1) tim = Timer(4, freq=1000) # Frequency in Hz ch1 = tim.channel(1, Timer.PWM, pin=Pin("P7"), pulse_width_percent=0) ch2 = tim.channel(2, Timer.PWM, pin=Pin("P8"), pulse_width_percent=0) ### Init: Camera clock = time.clock() # Tracks FPS. start = pyb.millis() sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize( sensor.QQQVGA) #80x60: recommended resolution for the processing we do sensor.set_vflip(True) sensor.set_hmirror(True) sensor.skip_frames(time=3000) ### Init: Recording file fname1 = "gs_" + str(pyb.millis()) + ".mjpeg" #m = mjpeg.Mjpeg(fname1) ### Init: Binary mask start = pyb.millis() deflection_angle = 0 a = 0.5 * sensor.width() lane_center = a #while(pyb.elapsed_millis(start) < record_time):
# High FPS Example # # This example shows off how to make the frame rate of the global shutter camera extremely # high. To do so you need to set the resolution to a low value such that pixel binning is # activated on the camera and then reduce the maximum exposure time. # # When the resolution is 320x240 or less the camera reads out pixels 2x faster. When the # resolution is 160x120 or less the camera reads out pixels 4x faster. This happens due # to pixel binning which is automatically activated for you to increase the readout speed. # # While the readout speed may increase the camera must still expose the image for the request # time so you will not get the maximum readout speed unless you reduce the exposure time too. # This results in a dark image however so YOU NEED A LOT of lighting for high FPS. import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA (160x120) - make smaller to go faster sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. sensor.set_auto_exposure(True, exposure_us=5000) # make smaller to go faster while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected # to the IDE. The FPS should increase once disconnected.
# Cartoon Filter # # This example shows off a simple cartoon filter on images. The cartoon # filter works by joining similar pixel areas of an image and replacing # the pixels in those areas with the area mean. import sensor, image, time sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... sensor.set_framesize(sensor.QVGA) # or QQVGA... sensor.skip_frames(time = 2000) clock = time.clock() while(True): clock.tick() # seed_threshold controls the maximum area growth of a colored # region. Making this larger will merge more pixels. # floating_threshold controls the maximum pixel-to-pixel difference # when growing a region. Settings this very high will quickly combine # all pixels in the image. You should keep this small. # cartoon() will grow regions while both thresholds are statisfied... img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05) print(clock.fps())
# Example 1 - LCD Shield Demo # # Note: To run this example you will need a LCD Shield for your OpenMV Cam. # # The LCD Shield allows you to view your OpenMV Cam's frame buffer on the go. import sensor, image, lcd sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE sensor.set_framesize(sensor.QQVGA2) # Special 128x160 framesize for LCD Shield. lcd.init() # Initialize the lcd screen. while(True): lcd.display(sensor.snapshot()) # Take a picture and display the image.
# Set the target temp range here min_temp_in_celsius = 20.0 max_temp_in_celsius = 35.0 print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.LCD) sensor.skip_frames(time=5000) clock = time.clock() lcd.init() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. def map_g_to_temp(g): return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius while(True): clock.tick() img = sensor.snapshot() blob_stats = []
def def_camInit(): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time = 2000) img = sensor.snapshot()
def create_time_based_filename(): datetime = rtc.datetime() return "{0}{1:02d}{2:02d}{3:02d}{4:02d}{5:02d}".format(datetime[0], datetime[1], datetime[2], \ datetime[4], datetime[5], datetime[6]) def set_servos(throttle): device.write("{%05d,%05d}\r\n" % (throttle, STEERING_SERVO_ZERO_US)) #leds_on(None) rtc = RTC() sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.B128X128) #sensor.set_windowing((20, 0, 150, 50)) sensor.set_hmirror(True) sensor.set_vflip(True) sensor.skip_frames(time=2000) sensor.set_auto_whitebal(False) sensor.set_auto_gain(False) device = pyb.UART(3, 19200, timeout_char=100) extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) #extra_fb = sensor.alloc_extra_fb(150, 50, sensor.GRAYSCALE) prev_throttle = int(ConfigFile().get_property("min_speed")) print("Previous min throttle=", str(prev_throttle)) print("Taking pic...")
# LCD 示例程序 # # 提示: 这个实验需要外接一个LCD模块. # # 本示例实现了LCD实时显示摄像头帧缓冲画面. # #翻译和注释:01Studio import sensor, image, lcd #摄像头初始化 sensor.reset() # 初始化摄像头. sensor.set_pixformat(sensor.RGB565) # 或者 sensor.GRAYSCALE sensor.set_framesize(sensor.QQVGA2) # LCD的分辨率为 128x160 . #LCD初始化 lcd.init() while (True): lcd.display(sensor.snapshot()) # 拍照和显示图像.
def set_framesize(self, framesize): sensor.set_framesize(framesize) self.skipped_frames = False
# finding Apriltags in the image using blob tracking to find the # area of where the tag is first and then calling find_apriltags # on that blob. # Note, this script works well assuming most parts of the image do not # pass the thresholding test... otherwise, you don't get a distance # benefit. import sensor, image, time, math, omv # Set the thresholds to find a white object (i.e. tag border) thresholds = (150, 255) sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA) elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA) else: raise Exception("You need a more powerful OpenMV Cam to run this script") sensor.skip_frames( time=200) # increase this to let the auto methods run for longer sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # The apriltag code supports up to 6 tag families which can be processed at the same time. # Returned tag objects will have their tag family and id within the tag family. tag_families = 0 tag_families |= image.TAG16H5 # comment out to disable this family tag_families |= image.TAG25H7 # comment out to disable this family tag_families |= image.TAG25H9 # comment out to disable this family tag_families |= image.TAG36H10 # comment out to disable this family
import sensor, image, network, usocket, fir SSID = '' # Network SSID KEY = '' # Network key HOST = '' # Use first available interface PORT = 8000 # Arbitrary non-privileged port # Reset sensor sensor.reset() # Set sensor settings sensor.set_contrast(1) sensor.set_brightness(1) sensor.set_saturation(1) sensor.set_gainceiling(16) sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.RGB565) # Initialize the thermal sensor fir.init() # Init wlan module and connect to network print("Trying to connect... (may take a while)...") wlan = network.WINC() wlan.connect(SSID, key=KEY, security=wlan.WPA_PSK) # We should have a valid IP now via DHCP print(wlan.ifconfig()) # Create server socket s = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
def person_detection(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) scores = tf.classify("person_detection", sensor.snapshot())[0].output() return ['unsure', 'person', 'no_person'][scores.index(max(scores))].encode()
# # We're using the robust=True argument for get_regression() in this script which # computes the linear regression using a much more robust algorithm... but potentially # much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED # TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually # take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! THRESHOLD = (0, 100) # Grayscale threshold for dark things... BINARY_VISIBLE = True # Does binary first so you can see what the linear regression # is being run on... might lower FPS though. import sensor, image, time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds clock = time.clock() # to process a frame sometimes. while(True): clock.tick() img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot() # Returns a line object similar to line objects returned by find_lines() and # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), # theta() (rotation in degrees), rho(), and magnitude(). # # magnitude() represents how well the linear regression worked. It means something # different for the robust linear regression. In general, the larger the value the # better... line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True)
def jpeg_snapshot(data): sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) return sensor.snapshot().compress(quality=90).bytearray()
# Edge Detection Example: # # This example demonstrates using the morph function on an image to do edge # detection and then thresholding and filtering that image afterwards. import sensor, image kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1 kernel = [-1, -1, -1,\ -1, +8, -1,\ -1, -1, -1] # This is a high pass filter kernel. ee here for more kernels: # http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf thresholds = [(100, 255)] # grayscale thresholds sensor.reset() sensor.set_framesize(sensor.QQVGA) # smaller resolution to go faster sensor.set_pixformat(sensor.GRAYSCALE) while(True): img = sensor.snapshot() img.morph(kernel_size, kernel) img.binary(thresholds) img.erode(1, threshold = 2) # erode pixels with less than 2 neighbors
from machine import I2C,UART from fpioa_manager import fm #导入maipy的fm(fpioa manager)这个内置的对象,用于后续注册工作。 #uart initial fm.register(35, fm.fpioa.UART2_RX, force=True) fm.register(34, fm.fpioa.UART2_TX, force=True) uart_Port = UART(UART.UART2, 115200,8,0,0, timeout=1000, read_buf_len= 4096) clock = time.clock() lcd.init() #lcd initial #set camera sensor.reset() sensor.set_pixformat(sensor.RGB565) #RGB565 sensor.set_framesize(sensor.QQVGA) # can be QVGA on M7... sensor.set_vflip(1) sensor.set_hmirror(1) sensor.skip_frames(20) sensor.run(1) sensor.set_auto_gain(False) #自动增益off while(True): img = sensor.snapshot() fps =clock.fps() # check =img.find_qrcodes()#scan img find qrcode if(len(check)>0): try: tuplexboxa =check[0].rect() data0=check[0].payload() img.draw_string(15,20,"Scan Successfully!",(236,36,36),scale=1.0) img.draw_rectangle(tuplexboxa,(236,36,36))
import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) # High Res! sensor.set_windowing( (320, 240)) # V Res of 80 == less work (40 for 2X the speed). sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal( False) # must turn this off to prevent image washout... clock = time.clock() # Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's # OV7725 camera module. Barcode detection will also work in RGB565 mode but at # a lower resolution. That said, barcode detection requires a higher resolution # to work well so it should always be run at 640x480 in grayscale... while (True): clock.tick() img = sensor.snapshot() img.lens_corr() codes = img.find_barcodes() for code in codes: img.draw_rectangle(code.rect()) print_args = (code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps()) print("Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) if not codes: print("FPS %f" % clock.fps())
RED_THRESHOLD = ( 0, 50, -128, 30, -30, 31 ) #(5, 89, 35, 75, -8, 50)#(28, 86, 16, 82, 14, 63) # Grayscale threshold for dark things... import sensor, image, time, math, struct from pyb import UART from struct import pack, unpack import json sensor.reset() sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize( sensor.QQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000. sensor.set_auto_whitebal(False) # must be turned off for color tracking #sensor.set_windowing([0,20,80,40]) sensor.skip_frames(time=2000) # WARNING: If you use QQVGA it may take seconds clock = time.clock() # to process a frame sometimes. uart = UART(3, 115200) while (True): clock.tick() img = sensor.snapshot().binary([RED_THRESHOLD]) line = img.get_regression([(100, 100, 0, 0, 0, 0)], robust=True) if (line): rho_err = abs(line.rho()) - img.width() / 2 #计算一条直线与图像中央的距离 #坐标变换 xy轴的角度 if line.theta() > 90: theta_err = line.theta() - 180 else: theta_err = line.theta()
# Basic Frame Differencing Example # # Note: You will need an SD card to run this example. # # This example demonstrates using frame differencing with your OpenMV Cam. It's # called basic frame differencing because there's no background image update. # So, as time passes the background image may change resulting in issues. import sensor, image, pyb, os, time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) sensor.skip_frames(time = 2000) # Let new settings take affect. sensor.set_auto_whitebal(False) # Turn off white balance. clock = time.clock() # Tracks FPS. if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory print("About to save background image...") sensor.skip_frames(time = 2000) # Give the user time to get ready. sensor.snapshot().save("temp/bg.bmp") print("Saved background image - Now frame differencing!") while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # Replace the image with the "abs(NEW-OLD)" frame difference. img.difference("temp/bg.bmp")
steering = STEERING_SERVO_MIN_US + ((steering * (STEERING_SERVO_MAX_US - STEERING_SERVO_MIN_US + 1)) / 181) steering = steering + (1500-steering)* steering_gain print("Steering: ",round(steering), "Throttle: ", round(throttle)) uart.write(str(round(steering))) # send to the Arduino. It looks for channel outputs in order, seperated by a "," and ended with a "\n" uart.write(",") uart.write(str(round(throttle))) uart.write("\n") # # Camera Control Code # sensor.reset() sensor.set_pixformat(sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE) sensor.set_framesize(FRAME_SIZE) sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \ int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION))) sensor.skip_frames(time = 200) if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False) if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False) clock = time.clock() #sensor.set_auto_exposure(False, \ # exposure_us = 300) ########### # Loop ###########
# Optical Flow Example # # Your OpenMV Cam can use optical flow to determine the displacement between # two images. This allows your OpenMV Cam to track movement like how your laser # mouse tracks movement. By tacking the difference between successive images # you can determine instaneous displacement with your OpenMV Cam too! import sensor, image, time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE sensor.set_framesize(sensor.B64x32) # or B40x30 or B64x64 clock = time.clock() # Tracks FPS. # NOTE: The find_displacement function works by taking the 2D FFTs of the old # and new images and compares them using phase correlation. Your OpenMV Cam # only has enough memory to work on two 64x64 FFTs (or 128x32, 32x128, or etc). old = sensor.snapshot() while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. [delta_x, delta_y, response] = old.find_displacement(img) old = img.copy() print("%0.1f X\t%0.1f Y\t%0.2f QoR\t%0.2f FPS" % \ (delta_x, delta_y, response, clock.fps()))
# This example shows off how to find lines in the image. For each line object # found in the image a line object is returned which includes the line's rotation. # Note: Line detection is done by using the Hough Transform: # http://en.wikipedia.org/wiki/Hough_transform # Please read about it above for more information on what `theta` and `rho` are. # find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. enable_lens_corr = False # turn on for straighter lines... import sensor, image, time sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) # grayscale is faster sensor.set_framesize(sensor.B64x32) sensor.skip_frames(time=2000) clock = time.clock() # All line objects have a `theta()` method to get their rotation angle in degrees. # You can filter lines based on their rotation angle. min_degree = 0 max_degree = 179 a1 = 0 b1 = 0 c1 = 0 a2 = 0 b2 = 0 c2 = 0
while 1: try: #sensor.reset() #Reset sensor may failed, let's try sometimes sensor.reset(freq=20000000, set_regs=True, dual_buff=True) break except: err_counter = err_counter + 1 if err_counter == 20: lcd.draw_string(lcd.width() // 2 - 100, lcd.height() // 2 - 4, "Error: Sensor Init Failed", lcd.WHITE, lcd.RED) time.sleep(0.1) continue sensor.run(0) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) #QVGA=320x240 img_w = sensor.width() img_h = sensor.height() sensor_ID = sensor.get_id() if (sensor_ID == 30530): sensor_ID_str = 'OV7740' print("image sensor is " + str(sensor_ID_str) + ", with size " + str(img_w) + " x " + str(img_h)) sensor.run(1) task = kpu.load(0x300000) # Load Model File from Flash anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) # Anchor data is for bbox, extracted from the training sets. kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
from pyb import Timer from pyb import LED import json led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4. thresholds = [ (27, 67, 19, 91, 45, 76), # 红色 #(21, 75, 3, -38, 34, 68), # 绿色 (27, 90, -3, -28, 31, 125), (0, 30, 0, 64, -128, 0) ] # generic_blue_thresholds threshold_index = 1 # 0 for red, 1 for gre9en, 2 for blue sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) #320*240 sensor.skip_frames(time=100) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() uart = UART(3, 115200) uart.init(115200, bits=8, parity=None, stop=1, timeout_char=1000) # 使用给定参数初始化 def tick(timer): # we will receive the timer object when being called global data if blobs: print("Find") print('you send:', output_str) uart.write(data)
# # Note: You will need an SD card to run this demo. # # You can use your OpenMV Cam to record mjpeg files. You can either feed the # recorder object JPEG frames or RGB565/Grayscale frames. Once you've finished # recording a Mjpeg file you can use VLC to play it. If you are on Ubuntu then # the built-in video player will work too. import sensor, image, time, mjpeg, pyb RED_LED_PIN = 1 BLUE_LED_PIN = 3 sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others) sensor.skip_frames(time=2000) # Let new settings take affect. clock = time.clock() # Tracks FPS. pyb.LED(RED_LED_PIN).on() sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() pyb.LED(BLUE_LED_PIN).on() m = mjpeg.Mjpeg("example.mjpeg") print("You're on camera!") for i in range(200): clock.tick() m.add_frame(sensor.snapshot())
""" DCT_reconstruct.py - By: Keiko Nagami - Friday Mar 13 2020 - Reads bytearrays containing quantized DCT coefficients of five 8x8pixel tiles from text files - Reconstructs the Quantized DCT coefficient matrices for each tile and computes the inverse DCT to get raw bayer image data - Saves .png image tiles - Run this with camera board, WITH SD card plugged in, in OpenMV IDE """ # import relevant libraries import sensor, image, math, umatrix, ulinalg, ulab sensor.reset() # initialize the camera sensor sensor.set_pixformat(sensor.RGB565) # sensor.RGB565 takes RGB image sensor.set_framesize(sensor.VGA) # sensor.VGA takes 640x480 image sensor.skip_frames(time=2500) # let new settings take effect # Bayer filter uses RGGB pattern. Each set contains row indices that correspond to the R,G, or B set_R = [ 0, 4, 8, 12, ] # R set_G = [ 1, 2, 5, 6, 9, 10, 13,
import sensor, image, time, pyb import my_ips, my_file, my_uart, my_key sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framerate(2 << 11) sensor.set_framesize(sensor.QQVGA) #160x120 sensor.skip_frames(time=1000) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) sensor.skip_frames(time=500) clock = time.clock() ledR = pyb.LED(1) ledG = pyb.LED(2) ledB = pyb.LED(3) ledB.on() my_ips.init() time.sleep(100) colorxy = 0 img_cnt = 0 red_threshold = [12, 80, 16, 73, -1, 56] blue_threshold = [0, 50, -128, 127, -128, -5] black_threshold = [0, 15, -128, 127, -128, 127] roiblue = [(0, 21, 33, 77), (12, 21, 21, 77), (50, 30, 15, 60)] ledR.on() ledG.on() def find_max(blobs): max_size = 0 for blob in blobs:
# finding Apriltags in the image using blob tracking to find the # area of where the tag is first and then calling find_apriltags # on that blob. # Note, this script works well assuming most parts of the image do not # pass the thresholding test... otherwise, you don't get a distance # benefit. import sensor, image, time, math, omv # Set the thresholds to find a white object (i.e. tag border) thresholds = (150, 255) sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA) elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA) else: raise Exception("You need a more powerful OpenMV Cam to run this script") sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # The apriltag code supports up to 6 tag families which can be processed at the same time. # Returned tag objects will have their tag family and id within the tag family. tag_families = 0 tag_families |= image.TAG16H5 # comment out to disable this family tag_families |= image.TAG25H7 # comment out to disable this family tag_families |= image.TAG25H9 # comment out to disable this family tag_families |= image.TAG36H10 # comment out to disable this family tag_families |= image.TAG36H11 # comment out to disable this family (default family)
# Global Shutter Triggered Mode Example # # This example shows off setting the global shutter camera into triggered mode. In triggered mode # snapshot() controls EXACTLY when integration of the camera pixels start such that you can sync # taking pictures to some external movement. Since the camera captures all pixels at the same time # (as it is a global shutter camera versus a rolling shutter camera) movement in the image will # only be captured for the integration time and not the integration time multipled by the number # of rows in the image. Additionally, sensor noise is reduced in triggered mode as the camera will # not read out rows until after exposing which results in a higher quality image. # # That said, your maximum frame rate will be reduced by 2 to 3 as frames are no longer generated # continously by the camera and because you have to wait for the integration to finish before # readout of the frame. import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480) sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. sensor.ioctl(sensor.IOCTL_SET_TRIGGERED_MODE, True) while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected # to the IDE. The FPS should increase once disconnected.
import sensor, image, time from pyb import UART, Pin, Timer import pyb import utime r_b = (45, 80, 40, 75, -50, -25) y_b = (60, 80, 0, 40, 55, 85) ##### 相机初始化 ##### sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # use RGB565. sensor.set_framesize(sensor.QVGA) # use QQVGA for speed. sensor.skip_frames(10) # Let new settings take affect. sensor.set_auto_whitebal(False) # turn this off. clock = time.clock() # Tracks FPS. ##### 变量初始化 ##### uart = UART(3, 115200) x = 0 y = 0 last_x = 0 last_y = 0 ######################### Kp_x = 0.130 ############# I_x = 0.018 ############# Td_x = 0.08 ############### Kp_y = 0.135 ############### I_y = 0.018 ############# Td_y = 0.10 ############# ######################### #########################
# AprilTags Example # # This example shows the power of the OpenMV Cam to detect April Tags # on the OpenMV Cam M7. The M4 versions cannot detect April Tags. import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger... sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution. sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() # Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. # What's the difference between tag families? Well, for example, the TAG16H5 family is effectively # a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which # is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve # rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a # reason to use the other tags families just use TAG36H11 which is the default family. while(True): clock.tick() img = sensor.snapshot() for tag in img.find_apriltags(): # defaults to TAG36H11 img.draw_rectangle(tag.rect(), color = (255, 0, 0)) img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) print_args = (tag.id(), (180 * tag.rotation()) / math.pi)
led = pyb.LED(3) usb = pyb.USB_VCP() while (usb.isconnected() == False): led.on() time.sleep(150) led.off() time.sleep(100) led.on() time.sleep(150) led.off() time.sleep(600) sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QQVGA) # Set frame size to QQVGA for april tags sensor.skip_frames(time=2000) # Wait for settings take effect. # AprilTags Test Measurements sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal( False) # must turn this off to prevent image washout... clock = time.clock() # Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. # What's the difference between tag families? Well, for example, the TAG16H5 family is effectively # a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which # is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve # rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a # reason to use the other tags families just use TAG36H11 which is the default family.
# Display on self.write_command(0x29) if __name__ == "__main__": import sensor, time #from lcd import LCD # Reset sensor sensor.reset() # Sensor settings sensor.set_contrast(2) sensor.set_brightness(0) sensor.set_saturation(2) sensor.set_pixformat(sensor.RGB565) # LCD resolution (128x160) sensor.set_framesize(sensor.QQVGA2) # Init LCD lcd = LCD() lcd.clear(0x00) lcd.set_backlight(True) clock = time.clock() while (True): clock.tick() # Capture a frame a draw it to LCD lcd.write_image(sensor.snapshot()) print(clock.fps())
rst.high() time.sleep(100) write_command(0x11) # Sleep Exit time.sleep(120) # Memory Data Access Control write_command(0x36, 0xC0) # Interface Pixel Format write_command(0x3A, 0x05) # Display On write_command(0x29) sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # must be this sensor.set_framesize(sensor.QQVGA2) # must be this sensor.skip_frames(time = 2000) # Let new settings take affect. clock = time.clock() # Tracks FPS. while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. write_command(0x2C) # Write image command... write_image(img) print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected.
# QRCode Example # # This example shows the power of the OpenMV Cam to detect QR Codes # using lens correction (see the qrcodes_with_lens_corr.py script for higher performance). import sensor, image, time sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) # can be QVGA on M7... sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... clock = time.clock() while(True): clock.tick() img = sensor.snapshot() img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens. for code in img.find_qrcodes(): img.draw_rectangle(code.rect(), color = (255, 0, 0)) print(code) print(clock.fps())
import sensor, image, time # For color tracking to work really well you should ideally be in a very, very, # very, controlled enviroment where the lighting is constant. Additionally, if # you want to track more than 2 colors you need to set the boundaries for them # very narrowly. If you try to track... generally red, green, and blue then # you will end up just tracking everything which you don't want. red_threshold = ( 40, 60, 60, 90, 50, 70) blue_threshold = ( 0, 20, -10, 30, -60, 10) # You may need to tweak the above settings for tracking red and blue things... # Select an area in the Framebuffer to copy the color settings. sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.RGB565) # use RGB565. sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed. sensor.skip_frames(10) # Let new settings take affect. sensor.set_whitebal(False) # turn this off. clock = time.clock() # Tracks FPS. while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. blobs = img.find_blobs([red_threshold, blue_threshold]) merged_blobs = img.find_markers(blobs) if merged_blobs: for b in merged_blobs: # Draw a rect around the blob. img.draw_rectangle(b[0:4]) # rect img.draw_cross(b[5], b[6]) # cx, cy
# Text Drawing # # This example shows off drawing text on the OpenMV Cam. import sensor, image, time, pyb sensor.reset() sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE... sensor.set_framesize(sensor.QVGA) # or QQVGA... sensor.skip_frames(time=2000) clock = time.clock() while (True): clock.tick() img = sensor.snapshot() for i in range(10): x = (pyb.rng() % (2 * img.width())) - (img.width() // 2) y = (pyb.rng() % (2 * img.height())) - (img.height() // 2) r = (pyb.rng() % 127) + 128 g = (pyb.rng() % 127) + 128 b = (pyb.rng() % 127) + 128 # If the first argument is a scaler then this method expects # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. img.draw_string(x, y, "Hello World!", color=(r, g, b), scale=2) print(clock.fps())
# NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B128X128 or B128X64 (2x faster). # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565) sensor.set_framesize( sensor.B128X128) # Set frame size to 128x128... (or 128x64)... sensor.skip_frames(time=2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE) extra_fb.replace(sensor.snapshot()) while (True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image.
import sensor, image, time, math # NOTE!!! You have to use a small power of 2 resolution when using # find_displacement(). This is because the algorithm is powered by # something called phase correlation which does the image comparison # using FFTs. A non-power of 2 resolution requires padding to a power # of 2 which reduces the usefulness of the algorithm results. Please # use a resolution like B64X64 or B64X32 (2x faster). # Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64, # 128x64, and 128x128. If you want a resolution of 32x32 you can create # it by doing "img.pool(2, 2)" on a 64x64 image. sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)... sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. # Take from the main frame buffer's RAM to allocate a second frame buffer. # There's a lot more RAM in the frame buffer than in the MicroPython heap. # However, after doing this you have a lot less RAM for some algorithms... # So, be aware that it's a lot easier to get out of RAM issues now. extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565) extra_fb.replace(sensor.snapshot()) while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
# AprilTags Example # # This example shows the power of the OpenMV Cam to detect April Tags # on the OpenMV Cam M7. The M4 versions cannot detect April Tags. import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize( sensor.QQVGA) # we run out of memory if the resolution is much bigger... sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal( False) # must turn this off to prevent image washout... clock = time.clock() # Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. # What's the difference between tag families? Well, for example, the TAG16H5 family is effectively # a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which # is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve # rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a # reason to use the other tags families just use TAG36H11 which is the default family. # The AprilTags library outputs the pose information for tags. This is the x/y/z translation and # x/y/z rotation. The x/y/z rotation is in radians and can be converted to degrees. As for # translation the units are dimensionless and you must apply a conversion function. # f_x is the x focal length of the camera. It should be equal to the lens focal length in mm # divided by the x sensor size in mm times the number of pixels in the image.
import sensor, time, pyb led_r = pyb.LED(1) led_g = pyb.LED(2) led_b = pyb.LED(3) #sensor.reset() sensor.set_contrast(2) sensor.set_framesize(sensor.QCIF) sensor.set_pixformat(sensor.RGB565) clock = time.clock() while (True): clock.tick() # Take snapshot image = sensor.snapshot() # Threshold image with RGB binary = image.threshold([(255, 0, 0), (0, 255, 0), (0, 0, 255)], 80) # Image closing binary.dilate(3) binary.erode(3) # Detect blobs in image blobs = binary.find_blobs() led_r.off() led_g.off()
# The below thresholds track in general red/green things. You may wish to tune them... thresholds = [ (30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) (30, 100, -64, -8, -32, 32), # generic_green_thresholds -> index is 1 so code == (1 << 1) (0, 15, 0, 40, -80, -20) ] # generic_blue_thresholds -> index is 2 so code == (1 << 2) # Codes are or'ed together when "merge=True" for "find_blobs". uart = UART(3, 115200, timeout_char=1000) uart.init(115200, bits=8, parity=None, stop=1, timeout_char=1000) sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) #QQVGA = frame of 160x 120 # QVGA = frame of 320x240 sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # OpenMV Cam Ground - Arduino Ground while (True): clock.tick() img = sensor.snapshot() chord = "0" #takes a snapshot -> returns class image #findblobs takes class image, and returns a list of class blob for blob in img.find_blobs(thresholds,
min_temp_in_celsius = 20.0 max_temp_in_celsius = 35.0 print("Resetting Lepton...") # These settings are applied on reset sensor.reset() sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True) sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius) print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH), sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT))) print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No")) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.LCD) sensor.skip_frames(time=5000) clock = time.clock() lcd.init() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" merges all overlapping blobs in the image. def map_g_to_temp(g): return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius while (True):
# Untitled - By: Administrator - pi 12 7 2018 import sensor import image import time color = [[]] tres = [0, 0, 0, 0, 0, 0] sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.CIF) sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() img = sensor.snapshot() img.draw_circle(177, 144, 75) img.draw_rectangle(123, 91, 106, 106) img = sensor.snapshot() tres[0] = image.rgb_to_lab(img.get_pixel(177, 144))[0] tres[1] = image.rgb_to_lab(img.get_pixel(177, 144))[0] tres[2] = image.rgb_to_lab(img.get_pixel(177, 144))[1] tres[3] = image.rgb_to_lab(img.get_pixel(177, 144))[1] tres[4] = image.rgb_to_lab(img.get_pixel(177, 144))[2] tres[5] = image.rgb_to_lab(img.get_pixel(177, 144))[2] # for blob in img.find_blobs(color, pixels_threshold=100, area_threshold=100, merge=True):
# CIFAR-10 Search Just Center Example # # CIFAR is a convolutional nueral network designed to classify it's field of view into several # different object types and works on RGB video data. # # In this example we slide the LeNet detector window over the image and get a list of activations # where there might be an object. Note that use a CNN with a sliding window is extremely compute # expensive so for an exhaustive search do not expect the CNN to be real-time. import sensor, image, time, os, nn sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((128, 128)) # Set 128x128 window. sensor.skip_frames(time=750) # Don't let autogain run very long. sensor.set_auto_gain(False) # Turn off autogain. sensor.set_auto_exposure(False) # Turn off whitebalance. # Load cifar10 network (You can get the network from OpenMV IDE). net = nn.load('/cifar10.network') # Faster, smaller and less accurate. # net = nn.load('/cifar10_fast.network') labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] clock = time.clock() while(True): clock.tick() img = sensor.snapshot()
# Camera/Hardware Objects: fmt = sensor.RGB565 res = sensor.QVGA led1 = pyb.LED(1) led2 = pyb.LED(2) # Get Camera ID: file = open("camId.txt") cam = int(file.readline()) file.close() # Set Up Sensor: sensor.reset() sensor.set_pixformat(fmt) sensor.set_framesize(res) sensor.set_brightness(-1) sensor.set_saturation(1) led1.on() sensor.skip_frames(time=1500) led1.off() sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking # Set Up Packets: blobPacket = {"cx": 0, "cy": 0, "w": 0, "h": 0, "pixels": 0, "color": 0} startOfPacket = { "cam": cam, "time": pyb.elapsed_millis(0), "fmt": fmt, "height": sensor.height(),
# This example shows off single color code tracking using the OpenMV Cam. # # A color code is a blob composed of two or more colors. The example below will # only track colored objects which have both the colors below in them. import sensor, image, time, math # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1) # Codes are or'ed together when "merge=True" for "find_blobs". sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. while(True): clock.tick() img = sensor.snapshot() for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) # These values depend on the blob not being circular - otherwise they will be shaky.
# Barcode Example # # This example shows off how easy it is to detect bar codes using the # OpenMV Cam M7. Barcode detection does not work on the M4 Camera. import sensor, image, time, math sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.VGA) # High Res! sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed). sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must turn this off to prevent image washout... sensor.set_auto_whitebal(False) # must turn this off to prevent image washout... clock = time.clock() # Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's # OV7725 camera module. Barcode detection will also work in RGB565 mode but at # a lower resolution. That said, barcode detection requires a higher resolution # to work well so it should always be run at 640x480 in grayscale... def barcode_name(code): if(code.type() == image.EAN2): return "EAN2" if(code.type() == image.EAN5): return "EAN5" if(code.type() == image.EAN8): return "EAN8" if(code.type() == image.UPCE): return "UPCE" if(code.type() == image.ISBN10):