Esempio n. 1
0
def face_detect():
    print("Detecting face...")
    sensor.reset()
    sensor.set_contrast(1)
    sensor.set_gainceiling(16)
    sensor.set_framesize(sensor.HQVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)

    clock = time.clock()

    LED.on()
    for i in range(60):
        img = sensor.snapshot()
        objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)

        # Draw objects
        if objects:
            for r in objects:
                img.draw_rectangle(r)
                cx=r[0]+r[2]//2
                cy=r[1]+r[3]//2
                img.draw_cross(cx,cy)
            obj = find_max(objects)
            cx_array.append(obj[0]+obj[2]//2)
            cy_array.append(obj[1]+obj[3]//2)
            h_array.append(obj[2])
            h_threshold = obj[2]
            if len(cx_array) == filter_stages:
                LED.off()
                print('Face detected.\nStart tracking...')
                return True
    LED.off()
    print('No face detected.\nTrying color mode...')
    return False
Esempio n. 2
0
def run(argv):
    mode = argv
    if (mode == 0):
        #check Arrow
        sensor.reset()
        '''sensor.set_auto_gain(False)
        sensor.set_contrast(1)
        sensor.set_gainceiling(16)
        #sensor.set_windowing((200, 200)) # 240x240 center pixels of VGA
        sensor.set_framesize(sensor.QQVGA)
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_auto_whitebal(False)
        '''
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.QQVGA)
        sensor.set_vflip(True)
        sensor.set_hmirror(True)
        sensor.skip_frames(time=2000)

        findArrow()
    else:
        #check signal mode
        sensor.reset()
        sensor.set_auto_gain(False)
        sensor.set_auto_whitebal(True)
        sensor.set_contrast(-3)
        sensor.set_brightness(-3)
        sensor.set_gainceiling(8)
        sensor.set_pixformat(sensor.RGB565)
        sensor.set_vflip(True)
        sensor.set_framesize(sensor.VGA)
        sensor.set_windowing((240, 240))  # 240x240 center pixels of VGA
        #sensor.set_windowing((200, 200)) # 200x200 center pixels of VGA
        sensor.skip_frames(time=800)
        checkSignal()
def FaceTest():
    sensor.reset()

    # Sensor settings
    sensor.set_contrast(1)
    sensor.set_gainceiling(16)
    # HQVGA and GRAYSCALE are the best for face tracking.

    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((320, 240))
    #sensor.set_framesize(sensor.QVGA)

    sensor.set_pixformat(sensor.GRAYSCALE)
    #sensor.set_framerate(2<<9|3<<11)
    # Load Haar Cascade
    # By default this will use all stages, lower satges is faster but less accurate.
    face_cascade = image.HaarCascade("frontalface", stages=25)
    print(face_cascade)
    clock = time.clock()
    for i in range(250):
        clock.tick()
        img = sensor.snapshot()
        objects = img.find_features(face_cascade,
                                    threshold=0.75,
                                    scale_factor=1.25)
        fID = 0
        for r in objects:
            img.draw_rectangle(r, color=(0, 0, 0), thickness=3)
            #img.draw_rectangle(r[0], r[1], 48, 10, fill=True, color=(0,0,0))
            fID += 1
            s = 'face %d' % (fID)
            img.draw_string(r[0], r[1], s)
        print(clock.fps())
Esempio n. 4
0
def init(is_debug, pixformat, delay_time):
    #关闭串口,防止初始化过程溢出
    uart.deinit()
    uart2.deinit()

    sensor.reset()

    sensor.set_pixformat(sensor.RGB565)  #RGB565

    sensor.set_framesize(sensor.QVGA)  #320*240

    sensor.set_gainceiling(128)  #增益上限 2,4,8,16,32,64,128
    sensor.set_contrast(3)  #对比度 -3至3
    sensor.set_brightness(0)  #亮度。-3至+3
    sensor.set_saturation(3)  #饱和度。-3至+3
    sensor.set_auto_exposure(True)  #自动曝光

    sensor.skip_frames(time=delay_time)
    sensor.set_auto_gain(False)  # 在进行颜色追踪时,必须关闭
    sensor.set_auto_whitebal(False)  # 在进行颜色追踪时,必须关闭

    #重新打开串口
    uart.init(115200, timeout_char=1000)
    uart2.init(115200, timeout_char=1000)
    #判断是否debug模式
    global CompetitionScene
    if is_debug == True:
        CompetitionScene = 0
    else:
        CompetitionScene = 1
Esempio n. 5
0
def CorrTest(loopCnt = 220, barLen=120):
    sensor.reset()

    # Sensor settings
    sensor.set_contrast(1)
    sensor.set_gainceiling(16)

    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)
    #sensor.set_windowing((480,272))
    clock = time.clock()
    avg = 0.0
    startTick = time.ticks()
    corr = 0.3
    while (True):
        if time.ticks() - startTick > loopCnt:
            break
        clock.tick()
        img = sensor.snapshot()
        for i in range(7):
            img.draw_rectangle(160-i*15, 120-i*15, i*15*2, i*15*2)
        corr += 0.05
        if corr >= 4.0:
            corr = 0.3
        img.lens_corr(corr)

        lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
        DrawPgsBar(img, barLen, loopCnt, startTick)
        img.draw_string(4,4,'Lens correction %.2f' % (corr), color=(0,0,0))
Esempio n. 6
0
def facetrack_camInit():
    print("INIT Facetrack")
    # Reset sensor
    sensor.reset()
    sensor.set_contrast(3)
    sensor.set_gainceiling(16)
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((200, 200))
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.skip_frames(time = 2000)
Esempio n. 7
0
def face_recog(calc_time, vi_ip):
    pin = pyb.millis()
    print(pin)
    print(calc_time)
    cc = 0
    #pyb.elapsed_millis(start)
    while pyb.elapsed_millis(pin) < calc_time:
        print("top of face recog function")
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()  # Initialize the camera sensor.
        sensor.set_contrast(3)
        sensor.set_gainceiling(16)
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA)  # or sensor.QQVGA (or others)
        #sensor.alloc_extra_fb()
        sensor.skip_frames(time=2000)  # Let new settings take affect.
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)  # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10  # We'll say we detected a face after 10 frames.
        try:
            while (diff):
                img = sensor.snapshot()
                sensor.alloc_extra_fb(img.width(), img.height(),
                                      sensor.GRAYSCALE)
                faces = img.find_features(face_cascade,
                                          threshold=0.5,
                                          scale_factor=1.5)
                sensor.dealloc_extra_fb()
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
                elif (pyb.elapsed_millis(pin)) > calc_time:
                    raise Exception
            pyb.LED(BLUE_LED_PIN).off()
            print("Face detected! Saving image...")
            pic_name = "snapshot-person.pgm"
            sensor.snapshot().save(
                pic_name)  # Save Pic. to root of SD card -- uos.chdir("/")
            pyb.delay(100)
            facial_recog(pic_name, vi_ip)
            gc.collect()
        except Exception as go:
            print("we are in exception")
            pyb.LED(BLUE_LED_PIN).off()
            gc.collect()
Esempio n. 8
0
def face_detect(init_start, calc_time):
    print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~")
    gc.collect()  #garbage collection
    while pyb.elapsed_millis(init_start) < calc_time:  #while time not expired
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()  # Initialize the camera sensor.
        sensor.set_contrast(3)  #set to highest contrast setting
        sensor.set_gainceiling(16)
        sensor.set_pixformat(
            sensor.GRAYSCALE)  #grayscale for facial recognition
        sensor.set_framesize(sensor.HQVGA)
        sensor.skip_frames(time=2000)  # Let new settings take affect.
        face_cascade = image.HaarCascade(
            "frontalface",
            stages=25)  #Using Frontal Face Haar Cascade Classifier
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)  # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10  # We'll say we detected a face after 10 frames.
        try:
            while (diff):
                img = sensor.snapshot()
                sensor.alloc_extra_fb(
                    img.width(), img.height(),
                    sensor.GRAYSCALE)  #allocate more space for image
                faces = img.find_features(
                    face_cascade, threshold=0.5,
                    scale_factor=1.5)  #detecting face features
                sensor.dealloc_extra_fb()
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
                elif (pyb.elapsed_millis(init_start)
                      ) > calc_time:  #if time is expired, leave function
                    raise Exception
            pyb.LED(BLUE_LED_PIN).off()
            print("Face detected! Saving image...")
            pic_name = "snapshot-person.pgm"
            sensor.snapshot().save(pic_name)  # Save Pic. to root of SD card
            pyb.delay(100)
            gc.collect()  #garbage collection
            return pic_name
        except Exception as go:
            print("exception - time expired")
            pyb.LED(BLUE_LED_PIN).off()
            gc.collect()  #garbage collection
Esempio n. 9
0
def test_color_bars():
    sensor.reset()
    # Set sensor settings
    sensor.set_brightness(0)
    sensor.set_saturation(3)
    sensor.set_gainceiling(8)
    sensor.set_contrast(2)

    # Set sensor pixel format
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)

    # Enable colorbar test mode
    sensor.set_colorbar(True)

    # Skip a few camera to allow the sensor settle down
    for i in range(0, 100):
        image = sensor.snapshot()

    #color bars thresholds
    t = [
        lambda r, g, b: r < 70 and g < 70 and b < 70,  # Black
        lambda r, g, b: r < 70 and g < 70 and b > 200,  # Blue
        lambda r, g, b: r > 200 and g < 70 and b < 70,  # Red
        lambda r, g, b: r > 200 and g < 70 and b > 200,  # Purple
        lambda r, g, b: r < 70 and g > 200 and b < 70,  # Green
        lambda r, g, b: r < 70 and g > 200 and b > 200,  # Aqua
        lambda r, g, b: r > 200 and g > 200 and b < 70,  # Yellow
        lambda r, g, b: r > 200 and g > 200 and b > 200
    ]  # White

    # color bars are inverted for OV7725
    if (sensor.get_id() == sensor.OV7725):
        t = t[::-1]

    #320x240 image with 8 color bars each one is approx 40 pixels.
    #we start from the center of the frame buffer, and average the
    #values of 10 sample pixels from the center of each color bar.
    for i in range(0, 8):
        avg = (0, 0, 0)
        idx = 40 * i + 20  #center of colorbars
        for off in range(0, 10):  #avg 10 pixels
            rgb = image.get_pixel(idx + off, 120)
            avg = tuple(map(sum, zip(avg, rgb)))

        if not t[i](avg[0] / 10, avg[1] / 10, avg[2] / 10):
            raise Exception('COLOR BARS TEST FAILED.'
                            'BAR#(%d): RGB(%d,%d,%d)' %
                            (i + 1, avg[0] / 10, avg[1] / 10, avg[2] / 10))

    print('COLOR BARS TEST PASSED...')
Esempio n. 10
0
def face_detect(init_start, calc_time):
    print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~")
    gc.collect()
    while pyb.elapsed_millis(init_start) < calc_time:
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()
        sensor.set_contrast(3)
        sensor.set_gainceiling(16)
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA)
        sensor.skip_frames(time=2000)
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10
        try:
            while (diff):
                img = sensor.snapshot()
                sensor.alloc_extra_fb(img.width(), img.height(),
                                      sensor.GRAYSCALE)
                faces = img.find_features(face_cascade,
                                          threshold=0.5,
                                          scale_factor=1.5)
                sensor.dealloc_extra_fb()
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
                elif (pyb.elapsed_millis(init_start)) > calc_time:
                    raise Exception
            pyb.LED(BLUE_LED_PIN).off()
            print("Face detected! Saving image...")
            pic_name = "snapshot-person.pgm"
            sensor.snapshot().save(pic_name)
            pyb.delay(100)
            gc.collect()
            return pic_name
        except Exception as go:
            print("exception - time expired")
            pyb.LED(BLUE_LED_PIN).off()
            gc.collect()
Esempio n. 11
0
def test_color_bars():

    sensor.reset()
    # Set sensor settings
    sensor.set_brightness(0)
    sensor.set_saturation(0)
    sensor.set_gainceiling(8)
    sensor.set_contrast(2)

    # Set sensor pixel format
    sensor.set_framesize(sensor.QVGA)
    sensor.set_pixformat(sensor.RGB565)

    # Enable colorbar test mode
    sensor.set_colorbar(True)

    # Skip a few frames to allow the sensor settle down
    # Note: This takes more time when exec from the IDE.
    for i in range(0, 100):
        image = sensor.snapshot()

    # Color bars thresholds
    t = [lambda r, g, b: r < 50  and g < 50  and b < 50,   # Black
         lambda r, g, b: r < 50  and g < 50  and b > 200,  # Blue
         lambda r, g, b: r > 200 and g < 50  and b < 50,   # Red
         lambda r, g, b: r > 200 and g < 50  and b > 200,  # Purple
         lambda r, g, b: r < 50  and g > 200 and b < 50,   # Green
         lambda r, g, b: r < 50  and g > 200 and b > 200,  # Aqua
         lambda r, g, b: r > 200 and g > 200 and b < 50,   # Yellow
         lambda r, g, b: r > 200 and g > 200 and b > 200]  # White

    # 320x240 image with 8 color bars each one is approx 40 pixels.
    # we start from the center of the frame buffer, and average the
    # values of 10 sample pixels from the center of each color bar.
    for i in range(0, 8):
        avg = (0, 0, 0)
        idx = 40*i+20 # center of colorbars
        for off in range(0, 10): # avg 10 pixels
            rgb = image.get_pixel(idx+off, 120)
            avg = tuple(map(sum, zip(avg, rgb)))

        if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
            raise Exception("COLOR BARS TEST FAILED. "
            "BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))

    print("COLOR BARS TEST PASSED...")
def init(is_debug, pixformat, delay_time):
    uart.deinit()
    sensor.reset()
    if pixformat == "GRAY":
        sensor.set_pixformat(sensor.GRAYSCALE)
    elif pixformat == "RGB":
        sensor.set_pixformat(sensor.RGB565)
    sensor.set_framesize(sensor.QQVGA)
    sensor.set_gainceiling(128)
    sensor.set_contrast(3)
    sensor.set_brightness(0)
    sensor.set_saturation(3)
    sensor.set_auto_exposure(True)
    sensor.skip_frames(time=delay_time)
    sensor.set_auto_gain(False)
    sensor.set_auto_whitebal(False)
    uart.init(115200, timeout_char=1000)
    global CompetitionScene
    if is_debug == True:
        CompetitionScene = 0
    else:
        CompetitionScene = 1
 def setConfig():
     while not uart.uart.any() > 0:
         pass
     uart.checking = True
     data = uart.uart.readline()
     data = data.decode('ascii')
     data = list(map(int, data.split('_')))
     Camera.frame = RES[data[0]]
     Camera.mode = CLR[data[1]]
     Camera.filter1 = data[2]
     Camera.filter2 = data[3]
     Camera.postproc = data[4]
     Camera.contrast = data[5] / 10
     Camera.brightness = data[6] / 10
     Camera.saturation = data[7]
     sensor.set_gainceiling(2**(data[8] if data[8] > 0 and data[8] < 8 else 3))
     Camera.flash = data[9] if data[9] <= 255 and data[9] >= 0 else 0
     LED.set_led(0, (Camera.flash, Camera.flash, Camera.flash))
     LED.display()
     sensor.set_pixformat(Camera.mode)
     sensor.set_framesize(Camera.frame)
     sensor.set_saturation(Camera.saturation)
     uart.checking = False
Esempio n. 14
0
def face_recog(calc_time):
    pin = pyb.millis()
    print(pin)
    print(calc_time)
    cc = 0
    #print(pyb.elapsed_millis(pin))
    while (pyb.elapsed_millis(pin)) < calc_time:
        print("top of face recog function")
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()  # Initialize the camera sensor.
        sensor.set_contrast(3)
        sensor.set_gainceiling(16)
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA)  # or sensor.QQVGA (or others)
        sensor.skip_frames(time=2000)  # Let new settings take affect.
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)  # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10  # We'll say we detected a face after 10 frames.
        #if (pyb.elapsed_millis(pin)) > calc_time:
        #continue
        try:
            while (diff):
                img = sensor.snapshot()
                faces = img.find_features(face_cascade,
                                          threshold=0.5,
                                          scale_factor=1.5)
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
                elif (pyb.elapsed_millis(pin)) > calc_time:
                    raise Exception
        except Exception as e:
            print("Get Out Exception called")

        pyb.LED(BLUE_LED_PIN).off()
        print("Face detected! Saving image...")
        pic_name = "snapshot-person.pgm"
        sensor.snapshot().save(
            pic_name)  # Save Pic. to root of SD card -- uos.chdir("/")
        pyb.delay(100)
        snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse()
        d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
        # face recognition
        pyb.LED(2).on()
        name_lbp_list = []
        uos.chdir(
            "/Faces"
        )  # change directory to where all the webex photos from tcp are stored
        for filename in uos.listdir("/Faces"):
            if filename.endswith(".pgm"):
                try:
                    img = None
                    img = image.Image(filename, copy_to_fb=True).mask_ellipse()
                    d1 = img.find_lbp((0, 0, img.width(), img.height()))
                    dist = image.match_descriptor(d0, d1, 50)
                    #print("weve matched")
                    word = filename
                    #print(filename)
                    und_loc = word.index('_')
                    word = word[0:(und_loc)]
                    name_lbp_list.append(word)
                    name_lbp_list.append(dist)
                    continue
                except Exception as e:
                    print(e)
                    print("error reading file")
            else:
                print("ERROR")
        print(name_lbp_list)
        #print(len(name_lbp_list))
        end = 0
        name_avg = []
        i = 0
        start = 0
        while i < len(name_lbp_list):
            if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] !=
                                                   name_lbp_list[i + 2]):
                end = i + 2
                #print(start)
                #print(end)
                face = []
                face = name_lbp_list[start:end]
                print(face)
                j = 1
                sum_lbp = 0
                while j < len(face):
                    sum_lbp += face[j]
                    j += 2
                name_avg.append(face[0])
                name_avg.append(sum_lbp / (len(face) / 2))
                start = i + 2
            i += 2
        face = []
        face = name_lbp_list[(end):(len(name_lbp_list))]
        print(face)
        j = 1
        sum_lbp = 0
        while j < len(face):
            sum_lbp += face[j]
            j += 2
        name_avg.append(face[0])
        name_avg.append(sum_lbp / (len(face) / 2))
        print(name_avg)
        lbps = []
        k = 1
        while k < len(name_avg):
            lbps.append(name_avg[k])
            k += 2
        print(lbps)
        #print(len(lbps))
        min_lbp = min(lbps)
        print(min_lbp)
        ind = lbps.index(min(lbps))
        #print(ind)
        ind += 1
        found_person = name_avg[2 * ind - 2]
        id_name = "The person you are looking at is: " + found_person
        print(id_name)
        #delete snapshot of person
        uos.remove("/snapshot-person.pgm")
        pyb.LED(2).off()
LED(2).on()
LED(3).on()
pin1 = Pin('P1', Pin.IN, Pin.PULL_UP)
pin2 = Pin('P2', Pin.IN, Pin.PULL_UP)
pin3 = Pin('P3', Pin.IN, Pin.PULL_UP)
old_L = 0
old_R = 0

clock = time.clock()
sensor.reset()
#sensor.set_vflip(True)
#sensor.set_hmirror(True)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(
    sensor.QQQVGA)  # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000.
sensor.set_gainceiling(2)
sensor.set_auto_exposure(False, exposure_us=2500)  #baoguanglv
sensor.set_contrast(+3)
sensor.set_saturation(-1)
sensor.set_brightness(-3)

while (True):
    if pin1.value():
        clock.tick()
        img = sensor.snapshot().binary([THRESHOLD])
        #img = sensor.snapshot()
        line = img.get_regression([(-100, -100, 0, 0, 0, 0)], robust=True)
        if (line):
            rho_err = abs(line.rho()) - img.width() / 2
            if line.theta() > 90:
                theta_err = line.theta() - 180
Esempio n. 16
0
import sensor, time

sensor.reset()
# Set sensor settings 
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_gainceiling(8)
sensor.set_contrast(2)

# Set sensor pixel format
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565)

# Enable colorbar test mode
sensor.set_colorbar(True)

# Skip a few frames to allow the sensor settle down 
for i in range(0, 30):
    image = sensor.snapshot()

#color bars thresholds
t = [lambda r, g, b: r < 50  and g < 50  and b < 50,   # Black
     lambda r, g, b: r < 50  and g < 50  and b > 200,  # Blue
     lambda r, g, b: r > 200 and g < 50  and b < 50,   # Red
     lambda r, g, b: r > 200 and g < 50  and b > 200,  # Purple
     lambda r, g, b: r < 50  and g > 200 and b < 50,   # Green
     lambda r, g, b: r < 50  and g > 200 and b > 200,  # Aqua
     lambda r, g, b: r > 200 and g > 200 and b < 50,   # Yellow
     lambda r, g, b: r > 200 and g > 200 and b > 200]  # White

#320x240 image with 8 color bars each one is approx 40 pixels.
Esempio n. 17
0
# 使用Canny算法做边缘检测:
#
# 这个例子演示了Canny边缘检测器.
#
#翻译:01Studio

import sensor, image, time

#初始化摄像头
sensor.reset()  # 初始化摄像头模块.
sensor.set_pixformat(sensor.GRAYSCALE)  # 或者使用 sensor.RGB565 彩色
sensor.set_framesize(sensor.QQVGA)  # 或者使用 sensor.QVGA (or others)
sensor.skip_frames(time=2000)  #延时让摄像头文稳定.
sensor.set_gainceiling(8)  #设置增益,这是官方推荐的参数

clock = time.clock()  # Tracks FPS.

while (True):

    clock.tick()  # 用于计算FPS(每秒帧数).
    img = sensor.snapshot()  # 拍摄并返回图像.

    #使用 Canny 边缘检测器
    img.find_edges(image.EDGE_CANNY, threshold=(50, 80))

    # 也可以使用简单快速边缘检测,效果一般,配置如下
    #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255))

    print(clock.fps())  #显示FPS(每秒帧数)
Esempio n. 18
0
radius = 40
windowX = 240
windowY = 240
buttColor = 4
headColor = 1
fps = 120

#SETUP
sensor.reset()
sensor.set_hmirror(True)
sensor.set_vflip(True)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_windowing((windowX, windowY)) # 240x240 center pixels of VGA
sensor.set_auto_gain(False, gain_db = 20) # must be turned off for color tracking
sensor.set_gainceiling(128)
sensor.set_auto_whitebal(False, rgb_gain_db = (-6.0, -3.0, 2)) # must be turned off for color tracking
#sensor.set_brightness(-1)
sensor.set_saturation(3)
#sensor.set_quality(100)
#sensor.set_auto_exposure(False, 1000)
#sensor.set_contrast(3)
sensor.skip_frames(time = 2000)
clock = time.clock()

kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc.
kernel = [-2, -1,  0, \
          -1,  6,  -1, \
           0,  -1,  -2]

while(True):
Esempio n. 19
0
import sensor, pyb, time

# Reset sensor
sensor.reset()

# Set sensor settings
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_gainceiling(16)
sensor.set_contrast(1)
sensor.set_framesize(sensor.QVGA)

# Enable JPEG and set quality
sensor.set_pixformat(sensor.JPEG)
sensor.set_quality(98)

# Red LED
led = pyb.LED(1)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
    sensor.snapshot()

# Turn on red LED and wait for a second
led.on()
time.sleep(1000)

# Write JPEG image to file
with open("/test.jpeg", "w") as f:
    f.write(sensor.snapshot())
Esempio n. 20
0
#************************************ (C) COPYRIGHT 2019 ANO ***********************************#
import sensor, image, time, math, struct, lcd
import json
from pyb import LED, Timer
from struct import pack, unpack
#初始化镜头
sensor.reset()
sensor.set_pixformat(sensor.RGB565)  #设置相机模块的像素模式
sensor.set_framesize(sensor.QVGA)  #设置相机分辨率160*120
sensor.skip_frames(time=3000)  #时钟
sensor.set_auto_whitebal(False)  #若想追踪颜色则关闭白平衡
clock = time.clock()  #初始化时钟

sensor.set_contrast(1)  #设置相机图像对比度。-3至+3
sensor.set_gainceiling(16)  #设置相机图像增益上限。2, 4, 8, 16, 32, 64, 128。
#lcd.init()
#主循环


class Recognition(object):
    flag = 0
    color = 0
    cx = 0
    cy = 0


Recognition = Recognition()
# 红色阈值
red_threshold = (40, 91, 34, 127, -60, 96)
# 绿色阈值
green_threshold = (42, 100, -84, -26, -2, 108)
Esempio n. 21
0
# the threshold the higher the number of keypoints extracted.
KEYPOINTS_THRESH=30
# Keypoint-level threshold, range from 0 to 100.
# This threshold is used when matching two keypoint descriptors, it's the
# percentage of the distance between two descriptors to the max distance.
# In other words, the minimum matching percentage between 2 keypoints.
MATCHING_THRESH=70

#greenled = pyb.LED(1)
#blueled = pyb.LED(2)
# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
#for i in range(0, 10):
#    img = sensor.snapshot()
#    img.draw_string(0, 0, "Please wait...")

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# First set of keypoints
# 人脸检测是通过在图像上使用Haar Cascade特征检测器来实现的。
# 一个 Haar Cascade 是一系列简单区域的对比检查.
# 人脸识别有25个阶段,每个阶段有几百次检测。Haar Cascades 运行很快因为是逐个阶段
# 递进检测的。 此外,OpenMV Cam使用一种称为积分图像的数据结构来在恒定时间内快速
#执行每个区域的对比度检查(特征检测需要用灰度图像的原因是因为图像积分需要更多空间)。
#
#翻译和注释:01Studio

import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)  #设置相机图像对比度为1
sensor.set_gainceiling(16)  #设置相机图像增益上限为16
# HQVGA 和 GRAYSCALE 是人脸检测最佳配置.
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# 加载 Haar Cascade 模型
# 默认使用25个步骤,减少步骤会加快速度但会影响识别成功率.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()