Example #1
0
def runIt():
    print("track_eye")
    cam5procs.logLine("track_eye")
    clock = time.clock()
    framecount = 0
    ledState = 0
    ledCounter = 0
    sensor.set_framesize(sensor.HQVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)
    face_cascade = image.HaarCascade("frontalface", stages=25)
    eyes_cascade = image.HaarCascade("eye", stages=24)
    #print(face_cascade,eyes_cascade)
    while not cam5procs.receive_packet():
        ledCounter += 1
        if ((ledCounter % 5) == 0):
            if (ledState == 0):
                ledState = 1
                cam5procs.ledShowColour([0, 0, 255])
            else:
                ledState = 0
                cam5procs.ledShowColour([0, 0, 0])
        clock.tick()
        img = sensor.snapshot()  # Capture snapshot
        # Find faces.
        # Note: Lower scale factor scales-down the image more and detects smaller objects.
        # Higher threshold results in a higher detection rate, with more false positives.
        faces = img.find_features(face_cascade, threshold=0.75, scale=1.35)
        # Draw objects
        framecount += 1
        for face in faces[:4]:
            img.draw_rectangle(face)
            # Now find eyes within each face.
            # Note: Use a higher threshold here (more detections) and lower scale (to find small objects)
            eyes = img.find_features(eyes_cascade,
                                     threshold=0.5,
                                     scale=1.2,
                                     roi=face)
            for e in eyes[:2]:
                img.draw_rectangle(e)
                tracked = [
                    framecount & 0xff, 0, 0, e[0], e[1], e[0] + e[2],
                    e[1] + e[3]
                ]
                #print("fps: ", clock.fps(),tracked)
                cam5procs.send_packet(tracked, 7, cam5procs.TRK_BLOB)
    # Print FPS.
    # Note: Actual FPS is higher, streaming the FB makes it slower.
    return True
def FaceTest():
    sensor.reset()

    # Sensor settings
    sensor.set_contrast(1)
    sensor.set_gainceiling(16)
    # HQVGA and GRAYSCALE are the best for face tracking.

    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((320, 240))
    #sensor.set_framesize(sensor.QVGA)

    sensor.set_pixformat(sensor.GRAYSCALE)
    #sensor.set_framerate(2<<9|3<<11)
    # Load Haar Cascade
    # By default this will use all stages, lower satges is faster but less accurate.
    face_cascade = image.HaarCascade("frontalface", stages=25)
    print(face_cascade)
    clock = time.clock()
    for i in range(250):
        clock.tick()
        img = sensor.snapshot()
        objects = img.find_features(face_cascade,
                                    threshold=0.75,
                                    scale_factor=1.25)
        fID = 0
        for r in objects:
            img.draw_rectangle(r, color=(0, 0, 0), thickness=3)
            #img.draw_rectangle(r[0], r[1], 48, 10, fill=True, color=(0,0,0))
            fID += 1
            s = 'face %d' % (fID)
            img.draw_string(r[0], r[1], s)
        print(clock.fps())
Example #3
0
def face_detection(data):
    sensor.set_pixformat(sensor.GRAYSCALE)
    sensor.set_framesize(sensor.QVGA)
    faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface"))
    if not faces: return bytes() # No detections.
    for f in faces: sensor.get_fb().draw_rectangle(f, color = (255, 255, 255))
    out_face = max(faces, key = lambda f: f[2] * f[3])
    return struct.pack("<HHHH", out_face[0], out_face[1], out_face[2], out_face[3])
Example #4
0
def face_recog(calc_time, vi_ip):
    pin = pyb.millis()
    print(pin)
    print(calc_time)
    cc = 0
    #pyb.elapsed_millis(start)
    while pyb.elapsed_millis(pin) < calc_time:
        print("top of face recog function")
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()  # Initialize the camera sensor.
        sensor.set_contrast(3)
        sensor.set_gainceiling(16)
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA)  # or sensor.QQVGA (or others)
        #sensor.alloc_extra_fb()
        sensor.skip_frames(time=2000)  # Let new settings take affect.
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)  # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10  # We'll say we detected a face after 10 frames.
        try:
            while (diff):
                img = sensor.snapshot()
                sensor.alloc_extra_fb(img.width(), img.height(),
                                      sensor.GRAYSCALE)
                faces = img.find_features(face_cascade,
                                          threshold=0.5,
                                          scale_factor=1.5)
                sensor.dealloc_extra_fb()
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
                elif (pyb.elapsed_millis(pin)) > calc_time:
                    raise Exception
            pyb.LED(BLUE_LED_PIN).off()
            print("Face detected! Saving image...")
            pic_name = "snapshot-person.pgm"
            sensor.snapshot().save(
                pic_name)  # Save Pic. to root of SD card -- uos.chdir("/")
            pyb.delay(100)
            facial_recog(pic_name, vi_ip)
            gc.collect()
        except Exception as go:
            print("we are in exception")
            pyb.LED(BLUE_LED_PIN).off()
            gc.collect()
Example #5
0
def unittest(data_path, temp_path):
    import image
    # Load Haar Cascade
    cascade = image.HaarCascade(data_path + "/frontalface.cascade")

    # Load image and find keypoints
    img = image.Image(data_path + "/dennis.pgm", copy_to_fb=True)

    # Find objects
    objects = img.find_features(cascade, threshold=0.75, scale_factor=1.25)
    return (objects and objects[0] == (189, 53, 88, 88)
            and objects[1] == (12, 11, 107, 107))
Example #6
0
def face_detect(init_start, calc_time):
    print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~")
    gc.collect()  #garbage collection
    while pyb.elapsed_millis(init_start) < calc_time:  #while time not expired
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()  # Initialize the camera sensor.
        sensor.set_contrast(3)  #set to highest contrast setting
        sensor.set_gainceiling(16)
        sensor.set_pixformat(
            sensor.GRAYSCALE)  #grayscale for facial recognition
        sensor.set_framesize(sensor.HQVGA)
        sensor.skip_frames(time=2000)  # Let new settings take affect.
        face_cascade = image.HaarCascade(
            "frontalface",
            stages=25)  #Using Frontal Face Haar Cascade Classifier
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)  # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10  # We'll say we detected a face after 10 frames.
        try:
            while (diff):
                img = sensor.snapshot()
                sensor.alloc_extra_fb(
                    img.width(), img.height(),
                    sensor.GRAYSCALE)  #allocate more space for image
                faces = img.find_features(
                    face_cascade, threshold=0.5,
                    scale_factor=1.5)  #detecting face features
                sensor.dealloc_extra_fb()
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
                elif (pyb.elapsed_millis(init_start)
                      ) > calc_time:  #if time is expired, leave function
                    raise Exception
            pyb.LED(BLUE_LED_PIN).off()
            print("Face detected! Saving image...")
            pic_name = "snapshot-person.pgm"
            sensor.snapshot().save(pic_name)  # Save Pic. to root of SD card
            pyb.delay(100)
            gc.collect()  #garbage collection
            return pic_name
        except Exception as go:
            print("exception - time expired")
            pyb.LED(BLUE_LED_PIN).off()
            gc.collect()  #garbage collection
class postProcess:
    faceCascade = image.HaarCascade('frontalface')

    def applyFilter(fid, img):
        f = [None, lambda i: i.gaussian(1), lambda i: i.histeq(), lambda i: i.mean(1),
         lambda i: i.mean(1), lambda i: i.erode(2),
         lambda i: i.dilate(2), lambda i: i.chrominvar() if i.format() == 2 else i,
         lambda i: i.illuminvar() if i.format() == 2 else i]
        return f[fid](img)

    def detectFace(img):
        f = img.find_features(postProcess.faceCascade, 0.9)
        if f:
            img.draw_rectangle(*f)
Example #8
0
def face_detect(init_start, calc_time):
    print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~")
    gc.collect()
    while pyb.elapsed_millis(init_start) < calc_time:
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()
        sensor.set_contrast(3)
        sensor.set_gainceiling(16)
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA)
        sensor.skip_frames(time=2000)
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10
        try:
            while (diff):
                img = sensor.snapshot()
                sensor.alloc_extra_fb(img.width(), img.height(),
                                      sensor.GRAYSCALE)
                faces = img.find_features(face_cascade,
                                          threshold=0.5,
                                          scale_factor=1.5)
                sensor.dealloc_extra_fb()
                if faces:
                    diff -= 1
                    for r in faces:
                        img.draw_rectangle(r)
                elif (pyb.elapsed_millis(init_start)) > calc_time:
                    raise Exception
            pyb.LED(BLUE_LED_PIN).off()
            print("Face detected! Saving image...")
            pic_name = "snapshot-person.pgm"
            sensor.snapshot().save(pic_name)
            pyb.delay(100)
            gc.collect()
            return pic_name
        except Exception as go:
            print("exception - time expired")
            pyb.LED(BLUE_LED_PIN).off()
            gc.collect()
def FaceTest(loopCnt=220, barLen=120):
    sensor.reset()

    # Sensor settings
    sensor.set_contrast(1)
    #sensor.set_gainceiling(16)
    # HQVGA and GRAYSCALE are the best for face tracking.

    #sensor.set_framesize(sensor.VGA)
    #sensor.set_windowing((320,240))
    sensor.set_framesize(sensor.VGA)
    sensor.set_windowing((320, 240))
    sensor.set_pixformat(sensor.GRAYSCALE)
    #sensor.set_auto_gain(False)
    #sensor.set_auto_whitebal(True) # must be turned off for color tracking
    # Load Haar Cascade
    # By default this will use all stages, lower satges is faster but less accurate.
    face_cascade = image.HaarCascade("frontalface", stages=25)
    print(face_cascade)
    clock = time.clock()
    avg = 0.0
    startTick = time.ticks()
    while (True):
        if time.ticks() - startTick > loopCnt:
            break
        clock.tick()
        img = sensor.snapshot()
        img.draw_string(4, 4, 'Face Detect', color=(0, 0, 0))
        t0 = time.ticks()
        objects = img.find_features(face_cascade,
                                    threshold=0.75,
                                    scale_factor=1.25)
        t1 = time.ticks() - t0
        avg = avg * 0.90 + t1 * 0.10
        fID = 0
        lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt
        DrawPgsBar(img, barLen, loopCnt, startTick)
        for r in objects:
            img.draw_rectangle(r, thickness=3)
            img.draw_rectangle(r[0], r[1], 48, 10, fill=True)
            fID += 1
            s = 'face %d' % (fID)
            img.draw_string(r[0], r[1], s, color=(0, 0, 0))
        print('algo time cost : %.2f ms' % (avg))
Example #10
0
def runIt():
    print("track_face")
    cam5procs.logLine("track_face")
    clock = time.clock()
    ledState = 0
    ledCounter = 0
    framecount = 0
    sensor.set_framesize(sensor.HQVGA)
    sensor.set_pixformat(sensor.GRAYSCALE)
    face_cascade = image.HaarCascade("frontalface", stages=25)
    print(face_cascade)
    while not cam5procs.receive_packet():
        ledCounter += 1
        if ((ledCounter % 5) == 0):
            if (ledState == 0):
                ledState = 1
                cam5procs.ledShowColour([0, 0, 255])
            else:
                ledState = 0
                cam5procs.ledShowColour([0, 0, 0])
        clock.tick()
        img = sensor.snapshot()  # Capture snapshot
        # Find faces.
        # Note: Lower scale factor scales-down the image more and detects smaller objects.
        # Higher threshold results in a higher detection rate, with more false positives.
        faces = img.find_features(face_cascade, threshold=0.75, scale=1.35)
        # Draw objects
        framecount += 1
        for r in faces[:8]:
            img.draw_rectangle(r)
            tracked = [
                framecount & 0xff, 0, 0, r[0], r[1], r[0] + r[2], r[1] + r[3]
            ]
            cam5procs.send_packet(tracked, 7, cam5procs.TRK_BLOB)
    # Print FPS.
    # Note: Actual FPS is higher, streaming the FB makes it slower.
    return True
Example #11
0
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)

clock = time.clock()

net = tf.load("trained.tflite", load_to_fb=True)
labels = [l.rstrip('\n') for l in open("labels.txt")]

while (True):
    clock.tick()

    # Take a picture and brighten things up for the frontal face detector.
    img = sensor.snapshot().gamma_corr(contrast=1.5)

    # Returns a list of rects (x, y, w, h) where faces are.
    faces = img.find_features(image.HaarCascade("frontalface"))

    for f in faces:

        # Classify a face and get the class scores list
        scores = net.classify(img, roi=f)[0].output()

        # Find the highest class score and lookup the label for that
        label = labels[scores.index(max(scores))]

        # Draw a box around the face
        img.draw_rectangle(f)

        # Draw the label above the face
        img.draw_string(f[0] + 3, f[1] - 1, label, mono_space=False)
Example #12
0
def face_recog(calc_time):
    pin = pyb.millis()
    print(pin)
    cc = 0
    #pyb.elapsed_millis(start)
    while pyb.elapsed_millis(pin) < calc_time:
        print("top of face recog function")
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()  # Initialize the camera sensor.
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA)  # or sensor.QQVGA (or others)
        sensor.skip_frames(time=2000)  # Let new settings take affect.
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)  # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10  # We'll say we detected a face after 10 frames.
        while (diff):
            img = sensor.snapshot()
            faces = img.find_features(face_cascade,
                                      threshold=0.5,
                                      scale_factor=1.5)
            if faces:
                diff -= 1
                for r in faces:
                    img.draw_rectangle(r)
        pyb.LED(BLUE_LED_PIN).off()
        print("Face detected! Saving image...")
        pic_name = "snapshot-person.pgm"
        sensor.snapshot().save(
            pic_name)  # Save Pic. to root of SD card -- uos.chdir("/")
        pyb.delay(100)
        snap_img = image.Image(pic_name).mask_ellipse()
        d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
        # face recognition
        pyb.LED(2).on()
        name_lbp_list = []
        uos.chdir(
            "/Faces"
        )  # change directory to where all the webex photos from tcp are stored
        for filename in uos.listdir("/Faces"):
            if filename.endswith(".pgm"):
                try:
                    img = None
                    img = image.Image(filename).mask_ellipse()
                    d1 = img.find_lbp((0, 0, img.width(), img.height()))
                    dist = image.match_descriptor(d0, d1, 50)
                    word = filename
                    #print(filename)
                    und_loc = word.index('_')
                    word = word[0:(und_loc)]
                    name_lbp_list.append(word)
                    name_lbp_list.append(dist)
                    continue
                except Exception as e:
                    print(e)
                    print("error reading file")
            else:
                print("ERROR")
        print(name_lbp_list)
        #print(len(name_lbp_list))
        end = 0
        name_avg = []
        i = 0
        start = 0
        while i < len(name_lbp_list):
            if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] !=
                                                   name_lbp_list[i + 2]):
                end = i + 2
                #print(start)
                #print(end)
                face = []
                face = name_lbp_list[start:end]
                print(face)
                j = 1
                sum_lbp = 0
                while j < len(face):
                    sum_lbp += face[j]
                    j += 2
                name_avg.append(face[0])
                name_avg.append(sum_lbp / (len(face) / 2))
                start = i + 2
            i += 2
        face = []
        face = name_lbp_list[(end):(len(name_lbp_list))]
        print(face)
        j = 1
        sum_lbp = 0
        while j < len(face):
            sum_lbp += face[j]
            j += 2
        name_avg.append(face[0])
        name_avg.append(sum_lbp / (len(face) / 2))
        print(name_avg)
        lbps = []
        k = 1
        while k < len(name_avg):
            lbps.append(name_avg[k])
            k += 2
        print(lbps)
        #print(len(lbps))
        min_lbp = min(lbps)
        print(min_lbp)
        ind = lbps.index(min(lbps))
        #print(ind)
        ind += 1
        found_person = name_avg[2 * ind - 2]
        id_name = "The person you are looking at is: " + found_person
        print(id_name)
        #delete snapshot of person
        uos.remove("/snapshot-person.pgm")
        pyb.LED(2).off()
        cc += 1
        print(cc)
Example #13
0
 def __init__(self):
     global_value.flag_disp_line = 1
     self.row = global_value.row
     global_value.row = global_value.row + 1
     self.face_cascade = image.HaarCascade("frontalface", stages=25)
Example #14
0
red_led = LED(1)

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames()

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=30)
eyes_cascade = image.HaarCascade("eye", stages=24)
print(face_cascade, eyes_cascade)

# FPS clock
clock = time.clock()


def circle_from_rect(rect):
    x = round(rect[2] / 2) + rect[0]
    y = round(rect[3] / 2) + rect[1]
    r = round(rect[3] / 2)
    return (x, y, r)


notFound = True
Example #15
0
import sensor, time, image

# Reset sensor
sensor.reset()
sensor.set_contrast(1)
sensor.set_gainceiling(16)

# HQVGA and GRAYSCALE are the best for face tracking.
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

#注意人脸识别只能用灰度图哦
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.

face_cascade = image.HaarCascade('frontalface', stages=25)

#image.HaarCascade(path, stages=Auto)加载一个haar模型。haar模型是二进制文件,
#这个模型如果是自定义的,则引号内为模型文件的路径;也可以使用内置的haar模型,
#比如“frontalface” 人脸模型或者“eye”人眼模型。
#stages值未传入时使用默认的stages。stages值设置的小一些可以加速匹配,但会降低准确率。
print(face_cascade)

clock = time.clock()
while (True):
    clock.tick()

    # Capture snapshot
    img = sensor.snapshot()

    # Find objects.
# Keypoint extractor threshold, range from 0 to any number.
# This threshold is used when extracting keypoints, the lower
# the threshold the higher the number of keypoints extracted.
KEYPOINTS_THRESH = 3
# Keypoint-level threshold, range from 0 to 100.
# This threshold is used when matching two keypoint descriptors, it's the
# percentage of the distance between two descriptors to the max distance.
# In other words, the minimum matching percentage between 2 keypoints.
MATCHING_THRESH = 85

# Number of maximum keypoints
KEYPOINTS_MAX = 80

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface")

# First set of keypoints
leo_kpts = None
andy_kpts = None
taiga_kpts = None
aya_kpts = None

# load faces!
img_andy = image.Image("/andy11.pgm", copy_to_fb=True)
andy_objects = img_andy.find_features(face_cascade, threshold=0.7, scale=1.3)
if andy_objects:
    print("Andy's face loaded!")
    # Expand the ROI by 31 pixels in each direction (half the pattern scale)
    andy_face = (andy_objects[0][0] - 31, andy_objects[0][1] - 31,
                 andy_objects[0][2] + 31 * 2, andy_objects[0][3] + 31 * 2)
import sensor, time, image, tf


def sort_preds(pred):
    return pred[1]


sensor.reset()

img = image.Image("/temp/licenseplate.bmp", copy_to_fb=True)

plates = image.HaarCascade("plate/cascade.cascade", stages=10)

zero_cascade = image.HaarCascade("plate/0_cascade.cascade", stages=10)
five_cascade = image.HaarCascade("plate/5_cascade.cascade", stages=10)
six_cascade = image.HaarCascade("plate/6_cascade.cascade", stages=10)
s_cascade = image.HaarCascade("plate/s_cascade.cascade", stages=10)
g_cascade = image.HaarCascade("plate/g_cascade.cascade", stages=10)

net = "/plate/trained.tflite"
labels = [line.rstrip('\n') for line in open("/plate/labels.txt")]

found_plate = img.find_features(plates, threshold=1, scale_factor=1.5)

for f in found_plate:

    (x, y, w, h) = f

    f = (x, y, w, int(h * 1.2))

    img.draw_rectangle(f)
Example #18
0
import sensor, image, time

#Reset sensor

sensor.reset()
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)

sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(time = 2000)

# Load Haar Cascade
# 默认情况下,这将使用所有阶段,较低的阶段更快但不太准确。
face_cascade = image.HaarCascade('FRONTALFACE',stages = 25)
print(face_cascade)

kpts1 = None

while(kpts1 == None):
    img = sensor.snapshot()
    img.draw_string(0,0,"Looking for a face....")
    # Find faces
    objects = img.find_features(face_cascade,threshold = 0.5,scale = 1.25)
    if objects:
        #在每个方向上将ROI扩大31个像素
        face = (objects[0][0]-31,objects[0][-1]-31,objects[0][2]+31*2,objects[0][3]+31*2)
        #使用检测面大小作为ROI提取关键点
        kpts1 = img.find_keypoints(threshold = 10,scale_factor = 1.1 ,max_keypoints= 100,roi= face)
        # Draw a rectangle around the first face
#翻译和注释:01Studio

import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# 加载 Haar Cascade 模型
# 默认使用全部stages,提高识别的准确率.
face_cascade = image.HaarCascade("frontalface", stages=25)  #定义人脸模型
eyes_cascade = image.HaarCascade("eye", stages=24)  #定义眼球模型
print(face_cascade, eyes_cascade)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()

    # Capture snapshot
    img = sensor.snapshot()

    # 人脸检测
    objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
Example #20
0
except:
    HAVE_LCD_MODULE = False
    print("lcd init error")

try:
    os.listdir('desc')
except:
    print('mkdir desc')
    os.mkdir('desc')
try:
    os.listdir('photo')
except:
    print('mkdir photo')
    os.mkdir('photo')

face_cascade = image.HaarCascade("frontalface", stages=HAAR_FACE_STAGES)


def loadFaceCascade():
    # Load Haar Cascade
    # By default this will use all stages, lower satges is faster but less accurate.
    # face_cascade = image.HaarCascade("frontalface", stages=HAAR_FACE_STAGES)
    # # print(face_cascade)
    # return face_cascade
    global face_cascade
    return face_cascade


# https://github.com/opencv/opencv/tree/master/data/haarcascades
# https://github.com/openmv/openmv/blob/master/ml/haarcascade/cascade_convert.py
# https://github.com/atduskgreg/opencv-processing/tree/master/lib/cascade-files #这数据太大了
# カメラを初期化
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.run(1)

# コントラスト設定
sensor.set_contrast(3)
# ゲイン設定
sensor.set_gainceiling(16)

##################################################
# main
##################################################
# カスケード情報を取得(顔)
cascade_frontalface = image.HaarCascade("frontalface")
# カスケード情報を取得(目)
cascade_eye = image.HaarCascade("eye")

while True:
    # カメラ画像を取得
    img = sensor.snapshot()
    # 特徴を検出(顔)
    res_frontalface = img.find_features(cascade_frontalface)
    # 結果が存在する場合
    if res_frontalface:
        # 全ての結果に対して実行
        for i in res_frontalface:
            print(i)
            # 矩形を描画(顔)
            img.draw_rectangle(i)
Example #22
0
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)

clock = time.clock()

while(True):
    clock.tick()
    img = sensor.snapshot()
    #print(clock.fps())
    #eye_position = img.find_eye([0, 0, img.width(), img.height()])
    #img.draw_circle(eye_position[0], eye_position[1], 5)

    x = img.find_features(image.HaarCascade('eye'))
    for feature in x:
        print(x)
        img.draw_rectangle(feature[0], feature[1], feature[2], feature[3])
        eye_position = img.find_eye(feature)
        print(eye_position)
        img.draw_circle(eye_position[0], eye_position[1], 5)


    #if x:
        #print(x)
        #img.draw_circle(x[0][0], x[0][1], 5)


# 1. Zaczac od wczytania w roznych formatach (DLA QVGA - 320x240)
# TYP - Terminal (GUI)
# Note: This script does not detect a face first, use it with the telephoto lens.

import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
eyes_cascade = image.HaarCascade("eye", stages=24)
print(eyes_cascade)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()
    # Capture snapshot
    img = sensor.snapshot()
    # Find eyes !
    # Note: Lower scale factor scales-down the image more and detects smaller objects.
    # Higher threshold results in a higher detection rate, with more false positives.
    eyes = img.find_features(eyes_cascade, threshold=0.5, scale=1.5)

    # Find iris
Example #24
0
# Use this script to gather face images for building a TensorFlow dataset. This script automatically
# zooms in the largest face in the field of view which you can then save using the data set editor.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time=2000)

clock = time.clock()

largest_face = None
largest_face_timeout = 0

while (True):
    clock.tick()

    faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(
        image.HaarCascade("frontalface"))

    if faces:
        largest_face = max(faces, key=lambda f: f[2] * f[3])
        largest_face_timeout = 20

    if largest_face_timeout > 0:
        sensor.get_fb().crop(roi=largest_face)
        largest_face_timeout -= 1

    print(clock.fps())
Example #25
0
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
#for i in range(0, 10):
#    img = sensor.snapshot()
#    img.draw_string(0, 0, "Please wait...")

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# First set of keypoints
hzy_kpts = None
hjf_kpts = None
hmy_kpts = None
sf_kpts = None

# load faces!
img_hjf = image.Image("/hjfface2-hqvga.bmp",copy_to_fb=True)
#img = img2
#img.draw_string(0, 0, "Looking for a face...")
    # Find faces
hjf_objects = img_hjf.find_features(face_cascade, threshold=0.7, scale=1.5)
if hjf_objects:
        self.isCounted = isCounted
        self.direction = direction
        self.prev_dir = prev_dir


sensor.reset()  # Reset and initialize the sensor.
sensor.set_pixformat(
    sensor.GRAYSCALE)  # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA)  # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240))  # Set 240x240 window.
sensor.skip_frames(time=2000)  # Let the camera adjust.

clock = time.clock()

print("Loading model")
person_cascade = image.HaarCascade(
    "/PeopleCounting/people_counting_cascade.cascade", stages=25)

height = None
width = None
count = 0

direction = 0
isCounted = False
i = 1
j = 1

b_boxes = []

vid = image.ImageReader("/PeopleCounting/walking.bin")

while (True):