def BlobTest(thresholds, loopCnt=390, barLen=120): sensor.reset() sensor.set_framerate(1 << 11) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.CIF) #sensor.set_windowing((320,240)) sensor.set_auto_gain(True) #sensor.set_auto_whitebal(True) # must be turned off for color tracking clock = time.clock() avg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() t0 = time.ticks() blobSet = img.find_blobs(thresholds, pixels_threshold=200, area_threshold=200) t1 = time.ticks() - t0 avg = avg * 0.95 + t1 * 0.05 lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick, 'red,green,blue blob detect') for blob in blobSet: img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) print('algo time cost : %.2f ms' % (avg))
def CIFAR10Test(loopCnt=600, isFull=False, barLen=105): pyb.LED(1).off() sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240) sensor.set_windowing((192, 192)) # Set window sensor.skip_frames(time=100) # Wait for settings take effect. sensor.set_auto_gain(True) sensor.set_framerate(0 << 9 | 1 << 12) if isFull: net = nn.load('/cifar10.network') else: net = nn.load('/cifar10_fast.network') labels = [ 'plane', 'auto', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] clock = time.clock() tAvg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() t0 = time.ticks() lst = net.search(img, threshold=0.640, min_scale=1, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5) t1 = time.ticks() - t0 tAvg = tAvg * 0.9 + t1 * 0.1 img.draw_string( 4, 8, 'CIFAR-10: classify:\nplane,auto,cat,dog,\ndeer,horse,frog,ship,\ntruck,horse', color=(0, 0, 0)) lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) for obj in lst: print(' %s - Confidence %f%%' % (labels[obj.index()], obj.value())) rc = obj.rect() #img.draw_rectangle(rc, color=(255,255,255)) img.draw_rectangle(barLen + 10, 1, 50, 8, fill=True, color=(0, 0, 0)) img.draw_string(barLen + 10, 0, labels[obj.index()]) print('algo time cost : %.2f ms' % (tAvg))
def QRCodeTest(loopCnt=120, barLen=120): sensor.reset() sensor.set_framerate(1 << 11) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.VGA) sensor.set_windowing((400, 272)) sensor.skip_frames(time=300) sensor.set_auto_gain(False) clock = time.clock() avg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() #img.lens_corr(1.5) # strength of 1.8 is good for the 2.8mm lens. t1 = time.ticks() codeSet = img.find_qrcodes() t2 = time.ticks() - t1 lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick, 'QR code scan') avg = avg * 0.92 + t2 * 0.08 for code in codeSet: rc = code.rect() img.draw_rectangle(rc, thickness=2, color=(0, 191, 255)) #print(type(code)) #print(code.payload()) sPayload = code.payload() #print(len(sPayload)) lnLen = len(sPayload) * 8 if rc[0] + lnLen >= 400: x = 400 - lnLen else: x = rc[0] img.draw_rectangle(x - 1, rc[1] + 1, lnLen + 2, 8, color=(0, 0, 0), fill=True) img.draw_string(x, rc[1], sPayload) print('algo time cost : %.2f ms' % (avg))
def LENETTest(loopCnt=1200, barLen=60): sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240) sensor.set_windowing((84, 84)) # Set 128x128 window. sensor.skip_frames(time=1400) # Wait for settings take effect. sensor.set_auto_gain(False) sensor.set_framerate(2 << 2) #sensor.set_auto_whitebal(False) #sensor.set_auto_exposure(False) net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() avg = 0.0 pyb.LED(1).on() startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() img.draw_string(3, 8, 'recg 0-9', color=(0, 0, 0)) t1 = time.ticks() tmp_img = img.copy().binary([(120, 255)], invert=True) lst = net.search(tmp_img, threshold=0.8, min_scale=1, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softmax=False) t2 = time.ticks() - t1 avg = avg * 0.95 + t2 * 0.05 lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt img.draw_rectangle(0, 2, barLen + 1, 3) img.draw_rectangle(0, 3, lnLen, 1, fill=True) for obj in lst: print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value())) img.draw_rectangle(obj.rect()) img.draw_string(barLen + 8, 2, labels[obj.index()], color=(0, 0, 0)) # print(clock.fps()) print('algo time cost : %.2f ms' % (avg))
import sensor, image, time, pyb import my_ips, my_file, my_uart, my_key sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framerate(2<<11) sensor.set_framesize(sensor.QQVGA)#160x120 my_ips.init() sensor.skip_frames(time = 500) sensor.set_auto_gain(False) sensor.set_auto_whitebal(False) sensor.skip_frames(time = 100) clock = time.clock() ledR = pyb.LED(1) ledG = pyb.LED(2) ledB = pyb.LED(3) ledB.on() ledR.on() ledG.on() colorxy = 0 img_cnt = 0 red_threshold = [[12, 80, 16, 73, -1, 56],[12, 80, 16, 73, -1, 56],[12, 80, 16, 73, -1, 56],[0, 70, 19, 90, -11, 35]] blue_threshold = [0, 50, -128, 127, -128, -5] black_threshold = [0, 15, -128, 127, -128, 127] white_threshold = [40, 100, -128, 127, -128, 127] red_ch = 0 roi_white = [(109,0,2,120),(113,0,2,120),(117,0,2,120),(121,0,2,120)] roi_white2 = [73,0,2,120]
# Himax motion detection example. import sensor, image, time, pyb from pyb import Pin, ExtInt sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time=2000) # The sensor is less noisy with lower FPS. sensor.set_framerate(15) # Configure and enable motion detection sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01) sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240)) sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR) sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True) motion_detected = False def on_motion(line): global motion_detected motion_detected = True led = pyb.LED(3) # Configure external interrupt pin. When motion is detected, this pin is pulled high ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
# Reset sensor sensor.reset() # Sensor settings sensor.set_auto_gain(True) sensor.set_auto_exposure(True) sensor.set_auto_whitebal(True) sensor.set_brightness(2) sensor.set_contrast(1) sensor.set_gainceiling(16) # HQVGA and GRAYSCALE are the best for face tracking. sensor.set_framesize(sensor.CIF) sensor.set_windowing((256, 192)) sensor.set_pixformat(sensor.RGB565) sensor.set_framerate(0 << 9 | 1 << 11) sensor.skip_frames(time=700) # Wait for settings take effect. sensor.set_auto_gain(False) # FPS clock clock = time.clock() nndemo.show(0, index=0, data=0) while (True): clock.tick() nndemo.show(0, index=0, data=0) #sensor.snapshot() # Print FPS. # Note: Actual FPS is higher, streaming the FB makes it slower. print(clock.fps())