def run(argv): mode = argv if (mode == 0): #check Arrow sensor.reset() '''sensor.set_auto_gain(False) sensor.set_contrast(1) sensor.set_gainceiling(16) #sensor.set_windowing((200, 200)) # 240x240 center pixels of VGA sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_auto_whitebal(False) ''' sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQVGA) sensor.set_vflip(True) sensor.set_hmirror(True) sensor.skip_frames(time=2000) findArrow() else: #check signal mode sensor.reset() sensor.set_auto_gain(False) sensor.set_auto_whitebal(True) sensor.set_contrast(-3) sensor.set_brightness(-3) sensor.set_gainceiling(8) sensor.set_pixformat(sensor.RGB565) sensor.set_vflip(True) sensor.set_framesize(sensor.VGA) sensor.set_windowing((240, 240)) # 240x240 center pixels of VGA #sensor.set_windowing((200, 200)) # 200x200 center pixels of VGA sensor.skip_frames(time=800) checkSignal()
def init(is_debug, pixformat, delay_time): #关闭串口,防止初始化过程溢出 uart.deinit() uart2.deinit() sensor.reset() sensor.set_pixformat(sensor.RGB565) #RGB565 sensor.set_framesize(sensor.QVGA) #320*240 sensor.set_gainceiling(128) #增益上限 2,4,8,16,32,64,128 sensor.set_contrast(3) #对比度 -3至3 sensor.set_brightness(0) #亮度。-3至+3 sensor.set_saturation(3) #饱和度。-3至+3 sensor.set_auto_exposure(True) #自动曝光 sensor.skip_frames(time=delay_time) sensor.set_auto_gain(False) # 在进行颜色追踪时,必须关闭 sensor.set_auto_whitebal(False) # 在进行颜色追踪时,必须关闭 #重新打开串口 uart.init(115200, timeout_char=1000) uart2.init(115200, timeout_char=1000) #判断是否debug模式 global CompetitionScene if is_debug == True: CompetitionScene = 0 else: CompetitionScene = 1
def CorrTest(loopCnt = 220, barLen=120): sensor.reset() # Sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) #sensor.set_windowing((480,272)) clock = time.clock() avg = 0.0 startTick = time.ticks() corr = 0.3 while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() for i in range(7): img.draw_rectangle(160-i*15, 120-i*15, i*15*2, i*15*2) corr += 0.05 if corr >= 4.0: corr = 0.3 img.lens_corr(corr) lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) img.draw_string(4,4,'Lens correction %.2f' % (corr), color=(0,0,0))
def face_detect(): print("Detecting face...") sensor.reset() sensor.set_contrast(1) sensor.set_gainceiling(16) sensor.set_framesize(sensor.HQVGA) sensor.set_pixformat(sensor.GRAYSCALE) clock = time.clock() LED.on() for i in range(60): img = sensor.snapshot() objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) # Draw objects if objects: for r in objects: img.draw_rectangle(r) cx=r[0]+r[2]//2 cy=r[1]+r[3]//2 img.draw_cross(cx,cy) obj = find_max(objects) cx_array.append(obj[0]+obj[2]//2) cy_array.append(obj[1]+obj[3]//2) h_array.append(obj[2]) h_threshold = obj[2] if len(cx_array) == filter_stages: LED.off() print('Face detected.\nStart tracking...') return True LED.off() print('No face detected.\nTrying color mode...') return False
def FaceTest(): sensor.reset() # Sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) # HQVGA and GRAYSCALE are the best for face tracking. sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) #sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.GRAYSCALE) #sensor.set_framerate(2<<9|3<<11) # Load Haar Cascade # By default this will use all stages, lower satges is faster but less accurate. face_cascade = image.HaarCascade("frontalface", stages=25) print(face_cascade) clock = time.clock() for i in range(250): clock.tick() img = sensor.snapshot() objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) fID = 0 for r in objects: img.draw_rectangle(r, color=(0, 0, 0), thickness=3) #img.draw_rectangle(r[0], r[1], 48, 10, fill=True, color=(0,0,0)) fID += 1 s = 'face %d' % (fID) img.draw_string(r[0], r[1], s) print(clock.fps())
def facetrack_camInit(): print("INIT Facetrack") # Reset sensor sensor.reset() sensor.set_contrast(3) sensor.set_gainceiling(16) sensor.set_framesize(sensor.VGA) sensor.set_windowing((200, 200)) sensor.set_pixformat(sensor.GRAYSCALE) sensor.skip_frames(time = 2000)
def face_recog(calc_time, vi_ip): pin = pyb.millis() print(pin) print(calc_time) cc = 0 #pyb.elapsed_millis(start) while pyb.elapsed_millis(pin) < calc_time: print("top of face recog function") #snapshot on face detection RED_LED_PIN = 1 BLUE_LED_PIN = 3 sensor.reset() # Initialize the camera sensor. sensor.set_contrast(3) sensor.set_gainceiling(16) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others) #sensor.alloc_extra_fb() sensor.skip_frames(time=2000) # Let new settings take affect. face_cascade = image.HaarCascade("frontalface", stages=25) uos.chdir("/") pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() diff = 10 # We'll say we detected a face after 10 frames. try: while (diff): img = sensor.snapshot() sensor.alloc_extra_fb(img.width(), img.height(), sensor.GRAYSCALE) faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) sensor.dealloc_extra_fb() if faces: diff -= 1 for r in faces: img.draw_rectangle(r) elif (pyb.elapsed_millis(pin)) > calc_time: raise Exception pyb.LED(BLUE_LED_PIN).off() print("Face detected! Saving image...") pic_name = "snapshot-person.pgm" sensor.snapshot().save( pic_name) # Save Pic. to root of SD card -- uos.chdir("/") pyb.delay(100) facial_recog(pic_name, vi_ip) gc.collect() except Exception as go: print("we are in exception") pyb.LED(BLUE_LED_PIN).off() gc.collect()
def face_detect(init_start, calc_time): print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~") gc.collect() #garbage collection while pyb.elapsed_millis(init_start) < calc_time: #while time not expired #snapshot on face detection RED_LED_PIN = 1 BLUE_LED_PIN = 3 sensor.reset() # Initialize the camera sensor. sensor.set_contrast(3) #set to highest contrast setting sensor.set_gainceiling(16) sensor.set_pixformat( sensor.GRAYSCALE) #grayscale for facial recognition sensor.set_framesize(sensor.HQVGA) sensor.skip_frames(time=2000) # Let new settings take affect. face_cascade = image.HaarCascade( "frontalface", stages=25) #Using Frontal Face Haar Cascade Classifier uos.chdir("/") pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() diff = 10 # We'll say we detected a face after 10 frames. try: while (diff): img = sensor.snapshot() sensor.alloc_extra_fb( img.width(), img.height(), sensor.GRAYSCALE) #allocate more space for image faces = img.find_features( face_cascade, threshold=0.5, scale_factor=1.5) #detecting face features sensor.dealloc_extra_fb() if faces: diff -= 1 for r in faces: img.draw_rectangle(r) elif (pyb.elapsed_millis(init_start) ) > calc_time: #if time is expired, leave function raise Exception pyb.LED(BLUE_LED_PIN).off() print("Face detected! Saving image...") pic_name = "snapshot-person.pgm" sensor.snapshot().save(pic_name) # Save Pic. to root of SD card pyb.delay(100) gc.collect() #garbage collection return pic_name except Exception as go: print("exception - time expired") pyb.LED(BLUE_LED_PIN).off() gc.collect() #garbage collection
def CIFAR10Test(loopCnt=600, isFull=False, barLen=105): pyb.LED(1).off() sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240) sensor.set_windowing((192, 192)) # Set window sensor.skip_frames(time=300) # Wait for settings take effect. sensor.set_auto_gain(False) #sensor.set_framerate(0<<9|1<<12) if isFull: net = nn.load('/cifar10.network') else: net = nn.load('/cifar10_fast.network') labels = [ 'plane', 'auto', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] clock = time.clock() tAvg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() t0 = time.ticks() lst = net.search(img, threshold=0.640, min_scale=1, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5) t1 = time.ticks() - t0 tAvg = tAvg * 0.9 + t1 * 0.1 img.draw_string( 4, 8, 'CIFAR-10: classify:\nplane,auto,cat,dog,\ndeer,horse,frog,ship,\ntruck,horse', color=(0, 0, 0)) lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) for obj in lst: print(' %s - Confidence %f%%' % (labels[obj.index()], obj.value())) rc = obj.rect() #img.draw_rectangle(rc, color=(255,255,255)) img.draw_rectangle(barLen + 10, 1, 50, 8, fill=True, color=(0, 0, 0)) img.draw_string(barLen + 10, 0, labels[obj.index()]) print('algo time cost : %.2f ms' % (tAvg))
def test_color_bars(): sensor.reset() # Set sensor settings sensor.set_brightness(0) sensor.set_saturation(3) sensor.set_gainceiling(8) sensor.set_contrast(2) # Set sensor pixel format sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) # Enable colorbar test mode sensor.set_colorbar(True) # Skip a few camera to allow the sensor settle down for i in range(0, 100): image = sensor.snapshot() #color bars thresholds t = [ lambda r, g, b: r < 70 and g < 70 and b < 70, # Black lambda r, g, b: r < 70 and g < 70 and b > 200, # Blue lambda r, g, b: r > 200 and g < 70 and b < 70, # Red lambda r, g, b: r > 200 and g < 70 and b > 200, # Purple lambda r, g, b: r < 70 and g > 200 and b < 70, # Green lambda r, g, b: r < 70 and g > 200 and b > 200, # Aqua lambda r, g, b: r > 200 and g > 200 and b < 70, # Yellow lambda r, g, b: r > 200 and g > 200 and b > 200 ] # White # color bars are inverted for OV7725 if (sensor.get_id() == sensor.OV7725): t = t[::-1] #320x240 image with 8 color bars each one is approx 40 pixels. #we start from the center of the frame buffer, and average the #values of 10 sample pixels from the center of each color bar. for i in range(0, 8): avg = (0, 0, 0) idx = 40 * i + 20 #center of colorbars for off in range(0, 10): #avg 10 pixels rgb = image.get_pixel(idx + off, 120) avg = tuple(map(sum, zip(avg, rgb))) if not t[i](avg[0] / 10, avg[1] / 10, avg[2] / 10): raise Exception('COLOR BARS TEST FAILED.' 'BAR#(%d): RGB(%d,%d,%d)' % (i + 1, avg[0] / 10, avg[1] / 10, avg[2] / 10)) print('COLOR BARS TEST PASSED...')
def init(self, robot_): self.robot = robot_ if self.robot == self.ROBOT_O: #O_bot self.thresholds = [ (35, 100, 26, 78, 22, 72), #Ball (68, 100, -19, 27, 41, 81), #Yellow Goal (20, 41, -6, 15, -55, -15) ] # Blue Goal self.window = (59, 18, 184, 181) elif self.robot == self.ROBOT_P2: #P2_bot self.thresholds = [ (39, 100, 26, 78, 22, 72), #Ball (68, 100, -19, 27, 41, 81), #Yellow Goal (20, 41, -6, 25, -55, -15) ] # Blue Goal self.window = (71, 4, 193, 191) # sensor setup sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) #Resolution sensor.set_windowing(self.window) sensor.skip_frames(time=1000) sensor.set_auto_exposure(False) sensor.set_auto_whitebal(False) # Need to let the above settings get in... sensor.skip_frames(time=250) # === GAIN === curr_gain = sensor.get_gain_db() sensor.set_auto_gain(False, gain_db=curr_gain) # === EXPOSURE === curr_exposure = sensor.get_exposure_us() sensor.set_auto_exposure(False, exposure_us=int(curr_exposure * 0.5)) # === WHITE BAL === sensor.set_auto_whitebal( False, rgb_gain_db=(-6.02073, -3.762909, 3.33901)) #Must remain false for blob tracking sensor.set_brightness(2) sensor.set_contrast(2) sensor.set_saturation(2) sensor.skip_frames(time=500)
def face_detect(init_start, calc_time): print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~") gc.collect() while pyb.elapsed_millis(init_start) < calc_time: RED_LED_PIN = 1 BLUE_LED_PIN = 3 sensor.reset() sensor.set_contrast(3) sensor.set_gainceiling(16) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.HQVGA) sensor.skip_frames(time=2000) face_cascade = image.HaarCascade("frontalface", stages=25) uos.chdir("/") pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") sensor.skip_frames(time=2000) pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() diff = 10 try: while (diff): img = sensor.snapshot() sensor.alloc_extra_fb(img.width(), img.height(), sensor.GRAYSCALE) faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) sensor.dealloc_extra_fb() if faces: diff -= 1 for r in faces: img.draw_rectangle(r) elif (pyb.elapsed_millis(init_start)) > calc_time: raise Exception pyb.LED(BLUE_LED_PIN).off() print("Face detected! Saving image...") pic_name = "snapshot-person.pgm" sensor.snapshot().save(pic_name) pyb.delay(100) gc.collect() return pic_name except Exception as go: print("exception - time expired") pyb.LED(BLUE_LED_PIN).off() gc.collect()
def init(is_debug,delay_time): uart.deinit() sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) sensor.set_contrast(3) sensor.set_brightness(-3) sensor.set_auto_exposure(True) sensor.skip_frames(time = delay_time) sensor.set_auto_whitebal(False) uart.init(115200,timeout_char=1000) lcd.init() global CompetitionScene if is_debug==True: CompetitionScene=0 else: CompetitionScene=1
def test_color_bars(): sensor.reset() # Set sensor settings sensor.set_brightness(0) sensor.set_saturation(0) sensor.set_gainceiling(8) sensor.set_contrast(2) # Set sensor pixel format sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) # Enable colorbar test mode sensor.set_colorbar(True) # Skip a few frames to allow the sensor settle down # Note: This takes more time when exec from the IDE. for i in range(0, 100): image = sensor.snapshot() # Color bars thresholds t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue lambda r, g, b: r > 200 and g < 50 and b < 50, # Red lambda r, g, b: r > 200 and g < 50 and b > 200, # Purple lambda r, g, b: r < 50 and g > 200 and b < 50, # Green lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow lambda r, g, b: r > 200 and g > 200 and b > 200] # White # 320x240 image with 8 color bars each one is approx 40 pixels. # we start from the center of the frame buffer, and average the # values of 10 sample pixels from the center of each color bar. for i in range(0, 8): avg = (0, 0, 0) idx = 40*i+20 # center of colorbars for off in range(0, 10): # avg 10 pixels rgb = image.get_pixel(idx+off, 120) avg = tuple(map(sum, zip(avg, rgb))) if not t[i](avg[0]/10, avg[1]/10, avg[2]/10): raise Exception("COLOR BARS TEST FAILED. " "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10)) print("COLOR BARS TEST PASSED...")
def LENETTest(loopCnt=1200, barLen=60): sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240) sensor.set_windowing((84, 84)) # Set 128x128 window. sensor.skip_frames(time=1400) # Wait for settings take effect. sensor.set_auto_gain(False) sensor.set_framerate(2 << 2) #sensor.set_auto_whitebal(False) #sensor.set_auto_exposure(False) net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() avg = 0.0 pyb.LED(1).on() startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() img.draw_string(3, 8, 'recg 0-9', color=(0, 0, 0)) t1 = time.ticks() tmp_img = img.copy().binary([(120, 255)], invert=True) lst = net.search(tmp_img, threshold=0.8, min_scale=1, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softmax=False) t2 = time.ticks() - t1 avg = avg * 0.95 + t2 * 0.05 lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt img.draw_rectangle(0, 2, barLen + 1, 3) img.draw_rectangle(0, 3, lnLen, 1, fill=True) for obj in lst: print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value())) img.draw_rectangle(obj.rect()) img.draw_string(barLen + 8, 2, labels[obj.index()], color=(0, 0, 0)) # print(clock.fps()) print('algo time cost : %.2f ms' % (avg))
def FaceTest(loopCnt=220, barLen=120): sensor.reset() # Sensor settings sensor.set_contrast(1) #sensor.set_gainceiling(16) # HQVGA and GRAYSCALE are the best for face tracking. #sensor.set_framesize(sensor.VGA) #sensor.set_windowing((320,240)) sensor.set_framesize(sensor.VGA) sensor.set_windowing((320, 240)) sensor.set_pixformat(sensor.GRAYSCALE) #sensor.set_auto_gain(False) #sensor.set_auto_whitebal(True) # must be turned off for color tracking # Load Haar Cascade # By default this will use all stages, lower satges is faster but less accurate. face_cascade = image.HaarCascade("frontalface", stages=25) print(face_cascade) clock = time.clock() avg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() img.draw_string(4, 4, 'Face Detect', color=(0, 0, 0)) t0 = time.ticks() objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) t1 = time.ticks() - t0 avg = avg * 0.90 + t1 * 0.10 fID = 0 lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) for r in objects: img.draw_rectangle(r, thickness=3) img.draw_rectangle(r[0], r[1], 48, 10, fill=True) fID += 1 s = 'face %d' % (fID) img.draw_string(r[0], r[1], s, color=(0, 0, 0)) print('algo time cost : %.2f ms' % (avg))
def init(is_debug, pixformat, delay_time): uart.deinit() sensor.reset() if pixformat == "GRAY": sensor.set_pixformat(sensor.GRAYSCALE) elif pixformat == "RGB": sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QQVGA) sensor.set_gainceiling(128) sensor.set_contrast(3) sensor.set_brightness(0) sensor.set_saturation(3) sensor.set_auto_exposure(True) sensor.skip_frames(time=delay_time) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) uart.init(115200, timeout_char=1000) global CompetitionScene if is_debug == True: CompetitionScene = 0 else: CompetitionScene = 1
def LENetTest(loopCnt=600, isFull=False, barLen=80): sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.CIF) # Set frame size to QVGA (320x240) sensor.set_windowing((96, 96)) # Set 128x128 window. sensor.set_auto_gain(True) sensor.set_auto_whitebal(False) sensor.set_auto_exposure(False) sensor.skip_frames(time=400) # Wait for settings take effect. net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() tAvg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() t0 = time.ticks() tmp_img = img.copy().binary([(120, 255)], invert=True) lst = net.search(tmp_img, threshold=0.8, min_scale=1.0, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softmax=False) t1 = time.ticks() - t0 tAvg = tAvg * 0.9 + t1 * 0.1 img.draw_string(4, 8, 'LENET', color=(0, 0, 0)) lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) for obj in lst: print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value())) img.draw_rectangle(obj.rect(), color=(255, 255, 255)) img.draw_string(4, 4, labels[obj.index()]) print('algo time cost : %.2f ms' % (tAvg))
import sensor sensor.set_contrast(1) sensor.set_gainceiling(8) sensor.set_framesize(sensor.QQVGA) sensor.set_pixformat(sensor.RGB565) image = sensor.snapshot() image.save("/test.ppm")
#求x的平方的和 def Square_Sum(data,datalen): i = 0 z = 0 for i in range(datalen): z = z + data[i]*data[i] return z #设置摄像头相关参数 sensor.reset() sensor.set_pixformat(sensor.RGB565) #设置图片格式(彩色/灰度图) sensor.set_framesize(sensor.QVGA) #设置图像大小 sensor.skip_frames(10) #等待设置生效 sensor.set_auto_whitebal(False) #关闭白平衡 sensor.set_auto_gain(False) #关闭自动亮度调节 sensor.set_contrast(0) #对比度 -3~3 7个调节 sensor.set_brightness(0) #亮度 sensor.set_saturation(0) #饱和度 uart = UART(3,115200) #设置串口 led = pyb.LED(3) #提示灯 clock = time.clock() #初始化时钟 #颜色识别区域的中心坐标 x_distance = y_distance = 0 #路径识别参数 ROIS = [ # [ROI, weight] #(0, 110, 160, 10), #(0, 100, 160, 10), (0, 090, 160, 10),
# Importe de librerias import sensor, image, lcd, time, utime import KPU as kpu # Configuración inicial de la pantalla LCD y la camara OV2640 lcd.init() # Inicializa la pantalla sensor.reset() # Inicializa la camara sensor.set_pixformat(sensor.RGB565) # Define el formato de color de la imagen sensor.set_framesize( sensor.QVGA) # Establece la captura de imagen como QVGA (320x240) sensor.set_windowing( (224, 224)) # Establece el tamaño de imagen con el que se entreno la red sensor.set_vflip(1) # Rotación vertical de la imagen sensor.set_saturation(-3) # Saturacion sensor.set_brightness(-3) # brightness sensor.set_contrast(-3) # contrast lcd.clear() # Limpia la pantalla y la deja en negro # Descripción y carga del modelo labels = ['Acaro', 'Bueno', 'Manchado'] # Etiquetas de la ultima capa de la red task = kpu.load( '/sd/3clases.kmodel') # Acá va al ubicación del archivo .kmodel (CARGA) kpu.set_outputs(task, 0, 1, 1, 3) # Aqúi van las dimensiones de la ultima capa de la red while (True): tick1 = utime.ticks_ms() # Ejecucion del modelo en tiempo real
from pyb import UART import math # 初始化摄像头 clock = time.clock() sensor.reset() # 初始化光感元件 if sensor.get_id() == sensor.OV7725: sensor.set_hmirror(True) # 水平方向翻转 sensor.set_vflip(True) # 垂直方向翻转 sensor.set_pixformat(sensor.RGB565) # 设置为rgb sensor.set_framesize(sensor.QVGA) # 设置图像大小 sensor.skip_frames(20) # 跳过前20帧 sensor.set_auto_exposure(1) sensor.set_auto_whitebal(False) # 关闭自动白平衡 sensor.set_auto_gain(False) # 关闭自动增益 sensor.set_contrast(+3) clock = time.clock() # 跟踪FPS帧率 uart = UART(3, 19200) #初始化串口三 def find_ball(): #引入和声明数据 global fps global img target_ball = 0 target_ball_size = 0 #先查找色块 img = sensor.snapshot().lens_corr(strength=1.1, zoom=1.0) blobs = img.find_blobs([ball_threshold], roi=area, pixels_threshold=20)
# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... # generic_red_thresholds -> index is 0 so code == (1 << 0) # generic_green_thresholds -> index is 1 so code == (1 << 1) # Codes are or'ed together when "merge=True" for "find_blobs". sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time=2000) sensor.set_auto_exposure(False, 5000) sensor.set_auto_gain(False, 0, 0) # must be turned off for color tracking sensor.set_auto_whitebal(False, (0, 0, 0)) # must be turned off for color tracking sensor.set_saturation(+3) sensor.set_contrast(-3) clock = time.clock() # _ # | \ # | |\ # | | \ # Goal/ _____| _| \ # Hole | | \ # | | \ # | | \ # | | \ # | | \ # | | \ # Goal _ | | \ <---- upward_distance # Hight | | \
#get the gains and exposure gain_db = sensor.get_gain_db() exposure_us = sensor.get_exposure_us() rgb_gain_db = sensor.get_rgb_gain_db() print("gain_db is " + str(gain_db)) print("exposure is " + str(exposure_us)) print("rgb_gain_db is " + str(rgb_gain_db)) # Set the gain and exposure as fixed (not concerned about the values) sensor.set_auto_gain(False, gain_db) sensor.set_auto_exposure(False, exposure_us) sensor.set_auto_whitebal(False, rgb_gain_db) # Setup contrast, brightness and saturation sensor.set_contrast(0) # range -3 to +3 sensor.set_brightness(0) # range -3 to +3 sensor.set_saturation(0) # range -3 to +3 # Disable night mode (auto frame rate) and black level calibration (BLC) sensor.__write_reg(0x0E, 0b00000000) # Disable night mode sensor.__write_reg(0x3E, 0b00000000) # Disable BLC sensor.__write_reg(0x13, 0b00000000) # disable automated gain gain_db = sensor.get_gain_db() exposure_us = sensor.get_exposure_us() rgb_gain_db = sensor.get_rgb_gain_db() print("gain_db is " + str(gain_db)) print("exposure is " + str(exposure_us)) print("rgb_gain_db is " + str(rgb_gain_db))
# Iris Detection 2 Example # # This example shows how to find the eye gaze (pupil detection) after finding # the eyes in an image. This script uses the find_eyes function which determines # the center point of roi that should contain a pupil. It does this by basically # finding the center of the darkest area in the eye roi which is the pupil center. # # Note: This script does not detect a face first, use it with the telephoto lens. import sensor, time, image # Reset sensor sensor.reset() # Sensor settings sensor.set_contrast(3) sensor.set_gainceiling(16) # Set resolution to VGA. sensor.set_framesize(sensor.VGA) # Bin/Crop image to 200x100, which gives more details with less data to process sensor.set_windowing((220, 190, 200, 100)) sensor.set_pixformat(sensor.GRAYSCALE) # Load Haar Cascade # By default this will use all stages, lower stages is faster but less accurate. eyes_cascade = image.HaarCascade("eye", stages=24) print(eyes_cascade)
# 用于匿名的openmv,串口通信程序 import sensor, image, time, math, struct from pyb import UART, LED, Timer #初始化镜头 sensor.reset() #初始化摄像头,reset()是sensor模块里面的函数 sensor.set_pixformat(sensor.GRAYSCALE) #设置图像色彩格式,有RGB565色彩图和GRAYSCALE灰度图两种 sensor.set_framesize(sensor.QQVGA) #图像质量 分辨率为120*160 sensor.skip_frames(30) sensor.set_auto_gain(True) #自动增益 sensor.set_auto_whitebal(True) #打开白平衡 sensor.set_contrast(3) #对比度 blue_led = LED(3) clock = time.clock() #初始化时钟 uart = UART(3, 500000) #初始化串口 波特率 500000 thresholds = [0, 100] #自定义灰度阈值 fthresholds = [100, 256] #自定义灰度阈值 class Dot(object): x = 0 y = 0 pixels = 0 num = 0 ok = 0 flag = 0 class singleline_check():
BG_UPDATE_FRAMES = 15 #50 BG_UPDATE_BLEND = 0 #128 # fire up servo board i2c = I2C(sda=Pin('P5'), scl=Pin('P4')) servo = Servos(i2c, address=0x40, freq=50, min_us=650, max_us=2800, degrees=180) sensor.reset() # Sensor settings sensor.set_contrast(3) #1 sensor.set_gainceiling(16) sensor.set_auto_whitebal(False) # HQVGA and GRAYSCALE are the best for face tracking. sensor.set_framesize(sensor.HQVGA) sensor.set_pixformat(sensor.GRAYSCALE) sensor.skip_frames(time=2000) # Load Haar Cascade # By default this will use all stages, lower satges (25)is faster but less accurate. face_cascade = image.HaarCascade("frontalface", stages=20) print(face_cascade) # BG subtraction setup extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
#************************************ (C) COPYRIGHT 2019 ANO ***********************************# import sensor, image, time, math, struct, lcd import json from pyb import LED, Timer from struct import pack, unpack #初始化镜头 sensor.reset() sensor.set_pixformat(sensor.RGB565) #设置相机模块的像素模式 sensor.set_framesize(sensor.QVGA) #设置相机分辨率160*120 sensor.skip_frames(time=3000) #时钟 sensor.set_auto_whitebal(False) #若想追踪颜色则关闭白平衡 clock = time.clock() #初始化时钟 sensor.set_contrast(1) #设置相机图像对比度。-3至+3 sensor.set_gainceiling(16) #设置相机图像增益上限。2, 4, 8, 16, 32, 64, 128。 #lcd.init() #主循环 class Recognition(object): flag = 0 color = 0 cx = 0 cy = 0 Recognition = Recognition() # 红色阈值 red_threshold = (40, 91, 34, 127, -60, 96) # 绿色阈值 green_threshold = (42, 100, -84, -26, -2, 108)
import sensor, image, time, math #引入摄像头、图像、时间模块 from pid import PID #引入PID模块 from pyb import UART #引入UART串口通信模块 sensor.reset() #初始化摄像头 sensor.set_pixformat(sensor.RGB565) #设置RGB图像格式 sensor.set_framesize(sensor.QQVGA) #设置图像分辨率 sensor.skip_frames(10, 1000) #跳过10帧 sensor.set_contrast(1) #设置对比度 sensor.set_auto_whitebal(False) #关闭白平衡 #sensor.set_auto_gain(False) #关闭自动增益 time.sleep(2000) #等待2S white_thresholds = (62, 98, 1, 9, -15, 10) #设置白色阈值 red_thresholds = (41, 83, 24, 83, -18, 60) #设置红色阈值 green_thresholds = (47, 93, -69, -24, 26, 83) #设置绿色阈值 threshold_change = red_thresholds #颜色阈值变量 size_threshold1 = 2500 #设置色块大小 size_threshold2 = 4000 #设置色块大小 x_pid = PID(p=4.9, i=0.5, imax=100) #初始化P、I参数 h_pid = PID(p=0.12, i=0.019, imax=50) #初始化P、I参数 uart = UART(3, 115200) #设置通信串口P4、P5 K = 500 #计算距离系数 logo = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] #判断距离大小是否趋于稳定的标识符
import sensor, time sensor.reset() # Set sensor settings sensor.set_brightness(0) sensor.set_saturation(0) sensor.set_gainceiling(8) sensor.set_contrast(2) # Set sensor pixel format sensor.set_framesize(sensor.QVGA) sensor.set_pixformat(sensor.RGB565) # Enable colorbar test mode sensor.set_colorbar(True) # Skip a few frames to allow the sensor settle down for i in range(0, 30): image = sensor.snapshot() #color bars thresholds t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue lambda r, g, b: r > 200 and g < 50 and b < 50, # Red lambda r, g, b: r > 200 and g < 50 and b > 200, # Purple lambda r, g, b: r < 50 and g > 200 and b < 50, # Green lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow lambda r, g, b: r > 200 and g > 200 and b > 200] # White #320x240 image with 8 color bars each one is approx 40 pixels.
# Set the maximum Frames Per Second for the main loop (fps) MAX_FPS = 2.0 # Reset and initialize the sensor. sensor.reset() # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_pixformat(sensor.RGB565) # Set frame size to 64x64 pixels sensor.set_framesize(sensor.B64X64) # Disable automatic exposure and gain sensor.set_auto_exposure(False) sensor.set_auto_gain(False) sensor.set_contrast(0) sensor.set_brightness(0) # Wait two seconds for settings take effect. sensor.skip_frames(time=2000) # Create a clock object to track the FPS. clock = time.clock() # Load the Tensorflow Lite model model = tf.load('whattolabel_resnet9.tflite') def setLED(color='none'): """Function to set LED to a color (red, green, blue, none) """
# This threshold is used when extracting keypoints, the lower # the threshold the higher the number of keypoints extracted. KEYPOINTS_THRESH=30 # Keypoint-level threshold, range from 0 to 100. # This threshold is used when matching two keypoint descriptors, it's the # percentage of the distance between two descriptors to the max distance. # In other words, the minimum matching percentage between 2 keypoints. MATCHING_THRESH=70 #greenled = pyb.LED(1) #blueled = pyb.LED(2) # Reset sensor sensor.reset() # Sensor settings sensor.set_contrast(1) sensor.set_gainceiling(16) sensor.set_framesize(sensor.HQVGA) sensor.set_pixformat(sensor.GRAYSCALE) # Skip a few frames to allow the sensor settle down # Note: This takes more time when exec from the IDE. #for i in range(0, 10): # img = sensor.snapshot() # img.draw_string(0, 0, "Please wait...") # Load Haar Cascade # By default this will use all stages, lower satges is faster but less accurate. face_cascade = image.HaarCascade("frontalface", stages=25) print(face_cascade)
user_exposure_time = 500 #camera exposure time, feel free to change. 900 is default value for tested setup sensor.reset() # Initialize the camera sensor sensor.set_pixformat( sensor.GRAYSCALE ) # set camera format to grayscale (color not important in this example) sensor.set_framesize(sensor.QVGA) # set camera resolution to QVGA 320 x 240 sensor.set_auto_whitebal(False) # Turn off white balance sensor.set_auto_exposure( False, exposure_us=user_exposure_time ) # set exposure time, user changable (user_exposure_time variable) #these setting are manually set in order to prevent camera to change it during use (no automatic setting in machine vision!) sensor.set_brightness(0) #set camera brightness sensor.set_contrast(2) #set camera contrast sensor.set_saturation(0) #set camera saturation # Print out the initial gain for comparison. print("Initial gain == %f db" % sensor.get_gain_db()) sensor.skip_frames( time=2000) #small 2 seconds pause, so camera can update its settings # calculating sensor gain # user can change GAIN_SCALE factor in order to obtain more bright image current_gain_in_decibels = sensor.get_gain_db() #current sensor gain print("Current Gain == %f db" % current_gain_in_decibels) #reporting current gain sensor.set_auto_gain(False, gain_db=current_gain_in_decibels * GAIN_SCALE) #new gain print("New gain == %f db" % sensor.get_gain_db()) #reporting new sensor gain