def main(model_addr=0x300000, lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) task = kpu.load(model_addr) anchors = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1] try: while(True): img = sensor.snapshot() t = time.ticks_ms() objects = kpu.run_yolo2(task, img) t = time.ticks_ms() - t if objects: for obj in objects: img.draw_rectangle(obj.rect()) img.draw_string(0, 200, "t:%dms" %(t), scale=2) lcd.display(img) except Exception as e: sys.print_exception(e) finally: kpu.deinit(task)
def init_sensor(): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) # aprilTag detection works with low resolutions sensor.skip_frames(60) sensor.set_vflip(1) # flips the image sensor.run(1)
def run(argv): mode = argv if (mode == 0): #check Arrow sensor.reset() '''sensor.set_auto_gain(False) sensor.set_contrast(1) sensor.set_gainceiling(16) #sensor.set_windowing((200, 200)) # 240x240 center pixels of VGA sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_auto_whitebal(False) ''' sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) sensor.set_vflip(True) sensor.set_hmirror(True) sensor.skip_frames(time=2000) findArrow() else: #check signal mode sensor.reset() sensor.set_auto_gain(False) sensor.set_auto_whitebal(True) sensor.set_contrast(-3) sensor.set_brightness(-3) sensor.set_gainceiling(8) sensor.set_pixformat(sensor.RGB565) sensor.set_vflip(True) sensor.set_framesize(sensor.VGA) sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA #sensor.set_windowing((200, 200)) # 200x200 center pixels of VGA sensor.skip_frames(time=800) checkSignal()
def main(labels=None, model_addr="/sd/m.kmodel", sensor_window=(224, 224), lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing(sensor_window) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) if not labels: with open('labels.txt', 'r') as f: exec(f.read()) if not labels: print("no labels.txt") img = image.Image(size=(320, 240)) img.draw_string(90, 110, "no labels.txt", color=(255, 0, 0), scale=2) lcd.display(img) return 1 try: img = image.Image("startup.jpg") lcd.display(img) except Exception: img = image.Image(size=(320, 240)) img.draw_string(90, 110, "loading model...", color=(255, 255, 255), scale=2) lcd.display(img) task = kpu.load(model_addr) try: while (True): img = sensor.snapshot() t = time.ticks_ms() fmap = kpu.forward(task, img) t = time.ticks_ms() - t plist = fmap[:] pmax = max(plist) max_index = plist.index(pmax) img.draw_string(0, 0, "%.2f : %s" % (pmax, labels[max_index].strip()), scale=2) img.draw_string(0, 200, "t:%dms" % (t), scale=2) lcd.display(img) except Exception as e: raise e finally: kpu.deinit(task)
def __init__(self, out_range=10, ignore_limit=0.02, hmirror=False, vflip=False, lcd_rotation=2, lcd_mirror=True): self.pitch = 0 self.roll = 0 self.out_range = out_range self.ignore = ignore_limit self.task_fd = kpu.load(0x300000) # face model addr in flash anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) kpu.init_yolo2(self.task_fd, 0.5, 0.3, 5, anchor) lcd.init() lcd.rotation(lcd_rotation) lcd.mirror(lcd_mirror) sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) if hmirror: sensor.set_hmirror(1) if vflip: sensor.set_vflip(1)
def check(self): try: self.btn.expand_event() if self.btn.home() == 2: sipeed_led.w.value(0) Report.RearSensor_Test = True sample_page.next() if self.isconnected == False: try: sensor.reset() sensor.set_pixformat(sensor.YUV422) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(1) sensor.set_vflip(1) sensor.run(1) sensor.skip_frames() self.isconnected = True sipeed_led.w.value(0) except Exception as e: Report.RearSensor_Test = False Report.isError = str(e) print(e) except Exception as e: Report.RearSensor_Test = False Report.isError = str(e) print(e)
def reset_sensor(self): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(True) sensor.set_hmirror(True) self.logger.trace("skipping frames...") sensor.skip_frames(time=2000)
def init(): sensor.reset(dual_buff=obj.is_dual_buff) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(1) sensor.set_vflip(1) sensor.run(1) sensor.skip_frames()
def init_sensor(): lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) # VGA is over sensor.set_hmirror(1) sensor.set_windowing((WINDOW_SIZE, WINDOW_SIZE)) sensor.set_vflip(1) sensor.run(1)
def init(): sensor.reset(dual_buff=__class__.is_dual_buff) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((320, 224)) sensor.set_hmirror(1) sensor.set_vflip(1) sensor.run(1) sensor.skip_frames()
def init(self, inverse): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time=1000) # sensor.set_brightness(0) # sensor.set_auto_exposure(False, 1800) # reduce camera exposure sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal( False) # must be turned off for color tracking if inverse: sensor.set_vflip(True)
def reset_sensor(): sensor.reset() sensor.set_pixformat( sensor.RGB565 if COLOR_LINE_FOLLOWING else sensor.GRAYSCALE) sensor.set_framesize(FRAME_SIZE) sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_windowing((int((sensor.width() / 2) - ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * (1.0 - FRAME_REGION)), \ int((sensor.width() / 2) + ((sensor.width() / 2) * FRAME_WIDE)), int(sensor.height() * FRAME_REGION) - BOTTOM_PX_TO_REMOVE)) sensor.skip_frames(time=200) if COLOR_LINE_FOLLOWING: sensor.set_auto_gain(False) if COLOR_LINE_FOLLOWING: sensor.set_auto_whitebal(False)
def camera_init(): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) # ov2640 id:9794 ,ov5642 id:22082 if sensor.get_id() == 9794: sensor.set_hmirror(1) sensor.set_vflip(1) else: sensor.set_hmirror(0) sensor.set_vflip(1) lcd.rotation(1)
def shoot(shutdown = False): import sensor if not Camera.isOn: sensor.reset() sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_pixformat(Camera.mode) sensor.set_framesize(Camera.frame) sensor.skip_frames(10) pic = sensor.snapshot() sensor.shutdown(shutdown) Camera.isOn = not shutdown return pic
def init(): #整体初始化函数 lcd.init() sensor.reset(freq=28000000, set_regs=True, dual_buff=True) # Reset and initialize the sensor. It will sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time=2000) # Wait for settings take effect. sensor.set_vflip(True) sensor.run(1) fm.register(board_info.PIN15, fm.fpioa.UART1_TX, force=True) fm.register(board_info.PIN17, fm.fpioa.UART1_RX, force=True)
def __init__(self): """Initialize the LED to show state and setup the camera sensor""" self._red_led = pyb.LED(1) # Turns led on (red color) self._red_led.on() # Setup sensor settings # https://docs.openmv.io/library/omv.sensor.html#constants sensor.reset() sensor.set_vflip(True) # Reverse image on vertical axis sensor.set_hmirror(True) # Reverse image on horizontal axis sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_auto_gain(False) # Must be turned off for color tracking # Must be turned off for color tracking sensor.set_auto_whitebal(False)
def main(anchors, labels = None, model_addr="/sd/m.kmodel", sensor_window=(224, 224), lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing(sensor_window) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) if not labels: with open('labels.txt','r') as f: exec(f.read()) if not labels: print("no labels.txt") img = image.Image(size=(320, 240)) img.draw_string(90, 110, "no labels.txt", color=(255, 0, 0), scale=2) lcd.display(img) return 1 try: img = image.Image("startup.jpg") lcd.display(img) except Exception: img = image.Image(size=(320, 240)) img.draw_string(90, 110, "loading model...", color=(255, 255, 255), scale=2) lcd.display(img) task = kpu.load(model_addr) kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1] try: while 1: img = sensor.snapshot() t = time.ticks_ms() objects = kpu.run_yolo2(task, img) t = time.ticks_ms() - t if objects: for obj in objects: pos = obj.rect() img.draw_rectangle(pos) img.draw_string(pos[0], pos[1], "%s : %.2f" %(labels[obj.classid()], obj.value()), scale=2, color=(255, 0, 0)) img.draw_string(0, 200, "t:%dms" %(t), scale=2, color=(255, 0, 0)) lcd.display(img) except Exception as e: raise e finally: kpu.deinit(task)
def initialize_camera(): err_counter = 0 while 1: try: sensor.reset() #Reset sensor may failed, let's try some times break except: err_counter = err_counter + 1 if err_counter == 20: lcd.draw_string(lcd.width()//2-100,lcd.height()//2-4, "Error: Sensor Init Failed", lcd.WHITE, lcd.RED) time.sleep(0.1) continue sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) #QVGA=320x240 sensor.set_vflip(True) sensor.run(1)
def fps_display(): clock = time.clock() lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) sensor.run(1) sensor.skip_frames(30) while True: clock.tick() img = sensor.snapshot() fps = clock.fps() img.draw_string( 2, 2, ("%2.1ffps" % fps), color=(0, 128, 0), scale=2) lcd.display(img)
def init(self, gain_db=0, shutter_us=500000, framesize=sensor.WQXGA2, force_reset=True, flip=False): if self.simulate: self.shutter = shutter_us self.gain = gain_db self.snap_started = False return if force_reset or self.has_error or self.gain != gain_db or self.shutter != shutter_us or self.framesize != framesize or self.flip != flip: sensor.reset() sensor.set_pixformat(self.pixfmt) sensor.set_framesize(framesize) if flip: # upside down camera sensor.set_vflip(True) sensor.set_hmirror(True) self.flip = flip self.framesize = framesize if shutter_us < 0: sensor.set_auto_exposure(True) else: if shutter_us > 500000: sensor.__write_reg(0x3037, 0x08) # slow down PLL if shutter_us > 1000000: pyb.delay(100) sensor.__write_reg(0x3037, 0x18) # slow down PLL if shutter_us > 1500000: pyb.delay(100) sensor.__write_reg(0x3036, 80) # slow down PLL # warning: doesn't work well, might crash pyb.delay(200) sensor.set_auto_exposure(False, shutter_us) self.shutter = shutter_us if gain_db < 0: sensor.set_auto_gain(True) else: sensor.set_auto_gain(False, gain_db) self.gain = gain_db self.wait_init = 2 self.width = sensor.width() self.height = sensor.height()
def find_face(): lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) sensor.run(1) task = kpu.load(0x300000) anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) kpu.init_yolo2(task, 0.5, 0.3, 5, anchor) while (True): img = sensor.snapshot() code = kpu.run_yolo2(task, img) if code: for i in code: img.draw_rectangle(i.rect()) lcd.display(img) kpu.deinit(task)
def __init__(self): self._m_fd = kpu.load(0x200000) self._m_ld = kpu.load(0x300000) self._m_fe = kpu.load(0x400000) self._anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) self._dst_point = [(44, 59), (84, 59), (64, 82), (47, 105), (81, 105)] self.names = [] self.features = [] _ = kpu.init_yolo2(self._m_fd, 0.5, 0.3, 5, self._anchor) self.img_face = image.Image(size=(128, 128)) _ = self.img_face.pix_to_ai() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) # sensor.set_hmirror(1) sensor.set_vflip(1) self.show_img_timeout = 5 self._show_img_t = -5
def initialize(uart_timeout): """Initialize the camera and lcd.""" lcd.init(freq=15000000) sensor.reset(freq=20000000, set_regs=True, dual_buff=True) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(False) sensor.set_vflip(False) sensor.skip_frames(time=2000) fm.register(board_info.PIN15, fm.fpioa.UART1_TX) uart_A = UART(UART.UART1, 115200, 8, 0, 0, timeout=uart_timeout, read_buf_len=4096) last_time = time() return uart_A, last_time
def main(model_addr=0x300000, lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): try: sensor.reset() except Exception as e: raise Exception( "sensor reset fail, please check hardware connection, or hardware damaged! err: {}" .format(e)) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) anchors = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) try: task = None task = kpu.load(model_addr) kpu.init_yolo2(task, 0.5, 0.3, 5, anchors) # threshold:[0,1], nms_value: [0, 1] while (True): img = sensor.snapshot() t = time.ticks_ms() objects = kpu.run_yolo2(task, img) t = time.ticks_ms() - t if objects: for obj in objects: img.draw_rectangle(obj.rect()) img.draw_string(0, 200, "t:%dms" % (t), scale=2) lcd.display(img) except Exception as e: raise e finally: if not task is None: kpu.deinit(task)
def shoot(shutdown = False): import sensor if not Camera.isOn: sensor.reset() sensor.set_vflip(True) sensor.set_hmirror(True) sensor.set_pixformat(Camera.mode) sensor.set_framesize(Camera.frame) sensor.skip_frames(10) pic = sensor.snapshot() pic.gamma_corr(1, Camera.contrast, Camera.brightness) if Camera.filter1 > 0: postProcess.applyFilter(Camera.filter1, pic) if Camera.filter2 > 0: postProcess.applyFilter(Camera.filter2, pic) if Camera.postproc == 1: postProcess.faceDetect(pic) sensor.shutdown(shutdown) Camera.isOn = not shutdown return pic
def main(labels = None, model_addr="/sd/m.kmodel", lcd_rotation=0, sensor_hmirror=False, sensor_vflip=False): gc.collect() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_windowing((224, 224)) sensor.set_hmirror(sensor_hmirror) sensor.set_vflip(sensor_vflip) sensor.run(1) lcd.init(type=1) lcd.rotation(lcd_rotation) lcd.clear(lcd.WHITE) if not labels: raise Exception("no labels.txt") task = kpu.load(model_addr) try: while(True): img = sensor.snapshot() t = time.ticks_ms() fmap = kpu.forward(task, img) t = time.ticks_ms() - t plist=fmap[:] pmax=max(plist) max_index=plist.index(pmax) img.draw_string(0,0, "%.2f\n%s" %(pmax, labels[max_index].strip()), scale=2, color=(255, 0, 0)) img.draw_string(0, 200, "t:%dms" %(t), scale=2, color=(255, 0, 0)) lcd.display(img) except Exception as e: sys.print_exception(e) finally: kpu.deinit(task)
import sensor import image import lcd import time clock = time.clock() #lcd.direction(lcd.YX_RLUD) lcd.init() sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) sensor.set_hmirror(1) sensor.run(1) sensor.skip_frames(30) #sensor.set_brightness(17) while True: clock.tick() img = sensor.snapshot() res = img.find_qrcodes() fps = clock.fps() if len(res) > 0: #img.draw_string(2,2, res[0].payload(), color=(0,128,0), scale=2) tupleboxa = res[0].rect() img.draw_string(40, 20, res[0].payload(), (236, 36, 36), scale=1.5) img.draw_rectangle(tupleboxa, (236, 36, 36)) print(res[0].payload()) lcd.display(img)
key_pressed = 1 else: key_pressed = 0 last_key_state = val fm.register(7, fm.fpioa.UART1_RX, force=True) #配置uart 7接收 uart = UART(UART.UART1, 115200, 8, None, 1, timeout=1000, read_buf_len=4096) #配置uart lcd.init() # 初始化lcd lcd.rotation(2) sensor.reset() #初始化sensor 摄像头 sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_hmirror(0) #设置摄像头镜像 sensor.set_vflip(0) #设置摄像头翻转 sensor.run(1) #使能摄像头 anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) #anchor for face detect 用于人脸检测的Anchor dst_point = [ (44, 59), (84, 59), (64, 82), (47, 105), (81, 105) ] #standard face key point position 标准正脸的5关键点坐标 分别为 左眼 右眼 鼻子 左嘴角 右嘴角 a = kpu.init_yolo2(task_fd, 0.5, 0.3, 5, anchor) #初始化人脸检测模型 img_lcd = image.Image() # 设置显示buf img_face = image.Image(size=(128, 128)) #设置 128 * 128 人脸图片buf a = img_face.pix_to_ai() # 将图片转为kpu接受的格式 record_ftr = [] #空列表 用于存储当前196维特征 record_ftrs = [] #空列表 用于存储按键记录下人脸特征, 可以将特征以txt等文件形式保存到sd卡后,读取到此列表,即可实现人脸断电存储。 names = [] # 人名标签,与上面列表特征值一一对应。 with open("/sd/recordftr3.txt", "r") as f: while (1):
【margin】调整合并色块的边缘。 对于 RGB565 图像,每个元组需要有六个值(l_lo,l_hi,a_lo,a_hi,b_lo,b_hi) 分别是 LAB中 L,A 和 B 通道的最小值和最大值。 L的取值范围为0-100,a/b 的取值范围为-128到127。 ''' import sensor import image import lcd import time lcd.init() sensor.reset(freq=24000000, set_regs=True, dual_buff=True) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) #设置摄像头后置 sensor.run(1) #红色阈值[0],绿色阈值[1],蓝色阈值[2] rgb_thresholds = [(30, 100, 15, 127, 15, 127), (0, 80, -70, -10, -0, 30), (0, 30, 0, 64, -128, -20)] while True: img = sensor.snapshot() blobs = img.find_blobs([rgb_thresholds[0]]) a = [0, 0, 0, 0, 0, 0, 0, 0] if blobs: for b in blobs: a[7] = b.area() if a[7] > a[6]: a[6] = a[7] a[0:4] = b.rect()
import image import time import ustruct as struct from pyb import UART # 红色小球的LAB色彩空间阈值 (L Min, L Max, A Min, A Max, B Min, B Max) RED_BALL_THRESHOLD = (57, 74, 38, 85, -21, 62) # 串口初始化 uart = UART(3, 115200) # OpenMV感光芯片初始化 sensor.reset() # 重置感芯片 sensor.set_pixformat(sensor.RGB565) # 设置像素格式为RGB565 sensor.set_framesize(sensor.QVGA) # 设置分辨率为QVGA (340 * 240) sensor.set_vflip(True) sensor.skip_frames(time=2000) # 跳过2s内的帧, 等待画质稳定 sensor.set_auto_gain(False) # 关闭自动增益 sensor.set_auto_whitebal(False) # 关闭自动白平衡 # 初始化时钟 clock = time.clock() while (True): clock.tick() # 开始计时 img = sensor.snapshot() # 拍摄一张照片 # 获取画面中的色块 blobs = img.find_blobs([RED_BALL_THRESHOLD], pixels_threshold=100, area_threshold=100, merge=True)
#connect to arduino uart = UART(3) uart.init(38400, bits=8, parity=None, stop=1) # Reset sensor sensor.reset() # Sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) # HQVGA and GRAYSCALE are the best for face tracking. sensor.set_framesize(sensor.HQVGA) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_vflip(True) # Load Haar Cascade # By default this will use all stages, lower satges is faster but less accurate. face_cascade = image.HaarCascade("frontalface", stages=25) # FPS clock clock = time.clock() while (True): clock.tick() # Capture snapshot img = sensor.snapshot() # Find faces.