Beispiel #1
0
def facetrack_run():
    global kpts1
    global img
    global x
    global y
    global x_pos
    global y_pos

    clock = time.clock()

    clock.tick()
    img = sensor.snapshot()
    # Extract keypoints using the detect face size as the ROI
    kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True)

    if (kpts2):
        # Match the first set of keypoints with the second one
        c = image.match_descriptor(kpts1, kpts2, threshold=85)
        # If more than 10% of the keypoints match draw the matching set
        if (c[2]>25):
            img.draw_cross(c[0], c[1], size=5)
            img.draw_string(0, 10, "Match %d%%"%(c[2]))

            x = c[0]
            y = c[1]
    utime.sleep_ms(300)
def unittest(data_path, temp_path):
    import image
    # Load image and find keypoints
    img = image.Image(data_path+"/graffiti.pgm", copy_to_fb=True)
    kpts1 = img.find_keypoints(max_keypoints=150, threshold=20, normalized=False)

    # Load descriptor
    kpts2 = image.load_descriptor(data_path+"/graffiti.orb")

    # Match keypoints
    match = image.match_descriptor(kpts1, kpts2, threshold=85)
    return  (match.cx()     == 138 and match.cy()     == 117 and \
             match.x()      == 36  and match.y()      == 34  and \
             match.w()      == 251 and match.h()      == 167 and \
             match.count()  == 150 and match.theta()  == 0)
Beispiel #3
0
def unittest(data_path, temp_path):
    import image
    # Load image and find keypoints
    img = image.Image(data_path + "/graffiti.pgm", copy_to_fb=True)
    kpts1 = img.find_keypoints(max_keypoints=150,
                               threshold=20,
                               normalized=False)

    # Load descriptor
    kpts2 = image.load_descriptor(data_path + "/graffiti.orb")

    # Match keypoints
    match = image.match_descriptor(kpts1, kpts2, threshold=85)
    return  (match.cx()     == 138 and match.cy()     == 117 and \
             match.x()      == 36  and match.y()      == 34  and \
             match.w()      == 251 and match.h()      == 167 and \
             match.count()  == 150 and match.theta()  == 0)
def find_person(img):
    d0 = img.find_lbp((0, 0, img.width(), img.height()))
    identical_pic = 0 #the pic that closet to the person from data
    end_range = 100000
    for s in range(1, NUM_SUBJECTS+1):
        dist = 0
        for i in range(1, NUM_SUBJECTS_IMGS):
            img = image.Image("persons/s%d/%d.pgm" %(s, i)).mask_ellipse()
            d1 = img.find_lbp((0, 0, img.width(), img.height()))
            dist += image.match_descriptor(d0, d1)
        distOfAll = dist/NUM_SUBJECTS_IMGS #distance of all images per person
        print("Average dist for subject %d: %d"%(s, distOfAll))
        if (distOfAll < end_range):
            identical_pic = s
            end_range = distOfAll
    if (end_range > 12000):
        identical_pic = -1  #-1 his undefined person
    return identical_pic
Beispiel #5
0
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
    img = sensor.snapshot()
    img.draw_string(0, 0, "Please wait...")

d0 = None
#d0 = image.load_descriptor("/desc.lbp")
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()

    objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25)
    if objects:
        face = objects[0]
        d1 = img.find_lbp(face)
        if (d0 == None):
            d0 = d1
        else:
            dist = image.match_descriptor(d0, d1)
            img.draw_string(0, 10, "Match %d%%"%(dist))

        img.draw_rectangle(face)
    # Draw FPS
    img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
Beispiel #6
0
def face_recog(calc_time):
    pin = pyb.millis()
    print(pin)
    cc = 0
    #pyb.elapsed_millis(start)
    while pyb.elapsed_millis(pin) < calc_time:
        print("top of face recog function")
        #snapshot on face detection
        RED_LED_PIN = 1
        BLUE_LED_PIN = 3
        sensor.reset()  # Initialize the camera sensor.
        sensor.set_pixformat(sensor.GRAYSCALE)
        sensor.set_framesize(sensor.HQVGA)  # or sensor.QQVGA (or others)
        sensor.skip_frames(time=2000)  # Let new settings take affect.
        face_cascade = image.HaarCascade("frontalface", stages=25)
        uos.chdir("/")
        pyb.LED(RED_LED_PIN).on()
        print("About to start detecting faces...")
        sensor.skip_frames(time=2000)  # Give the user time to get ready.
        pyb.LED(RED_LED_PIN).off()
        print("Now detecting faces!")
        pyb.LED(BLUE_LED_PIN).on()
        diff = 10  # We'll say we detected a face after 10 frames.
        while (diff):
            img = sensor.snapshot()
            faces = img.find_features(face_cascade,
                                      threshold=0.5,
                                      scale_factor=1.5)
            if faces:
                diff -= 1
                for r in faces:
                    img.draw_rectangle(r)
        pyb.LED(BLUE_LED_PIN).off()
        print("Face detected! Saving image...")
        pic_name = "snapshot-person.pgm"
        sensor.snapshot().save(
            pic_name)  # Save Pic. to root of SD card -- uos.chdir("/")
        pyb.delay(100)
        snap_img = image.Image(pic_name).mask_ellipse()
        d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
        # face recognition
        pyb.LED(2).on()
        name_lbp_list = []
        uos.chdir(
            "/Faces"
        )  # change directory to where all the webex photos from tcp are stored
        for filename in uos.listdir("/Faces"):
            if filename.endswith(".pgm"):
                try:
                    img = None
                    img = image.Image(filename).mask_ellipse()
                    d1 = img.find_lbp((0, 0, img.width(), img.height()))
                    dist = image.match_descriptor(d0, d1, 50)
                    word = filename
                    #print(filename)
                    und_loc = word.index('_')
                    word = word[0:(und_loc)]
                    name_lbp_list.append(word)
                    name_lbp_list.append(dist)
                    continue
                except Exception as e:
                    print(e)
                    print("error reading file")
            else:
                print("ERROR")
        print(name_lbp_list)
        #print(len(name_lbp_list))
        end = 0
        name_avg = []
        i = 0
        start = 0
        while i < len(name_lbp_list):
            if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] !=
                                                   name_lbp_list[i + 2]):
                end = i + 2
                #print(start)
                #print(end)
                face = []
                face = name_lbp_list[start:end]
                print(face)
                j = 1
                sum_lbp = 0
                while j < len(face):
                    sum_lbp += face[j]
                    j += 2
                name_avg.append(face[0])
                name_avg.append(sum_lbp / (len(face) / 2))
                start = i + 2
            i += 2
        face = []
        face = name_lbp_list[(end):(len(name_lbp_list))]
        print(face)
        j = 1
        sum_lbp = 0
        while j < len(face):
            sum_lbp += face[j]
            j += 2
        name_avg.append(face[0])
        name_avg.append(sum_lbp / (len(face) / 2))
        print(name_avg)
        lbps = []
        k = 1
        while k < len(name_avg):
            lbps.append(name_avg[k])
            k += 2
        print(lbps)
        #print(len(lbps))
        min_lbp = min(lbps)
        print(min_lbp)
        ind = lbps.index(min(lbps))
        #print(ind)
        ind += 1
        found_person = name_avg[2 * ind - 2]
        id_name = "The person you are looking at is: " + found_person
        print(id_name)
        #delete snapshot of person
        uos.remove("/snapshot-person.pgm")
        pyb.LED(2).off()
        cc += 1
        print(cc)
        # Expand the ROI by 31 pixels in each direction (half the pattern scale)
        img_face = (img_objects[0][0] - 31, img_objects[0][1] - 31,
                    img_objects[0][2] + 31 * 2, img_objects[0][3] + 31 * 2)
        # Extract keypoints using the detect face size as the ROI
        kpts = img.find_keypoints(threshold=KEYPOINTS_THRESH,
                                  scale_factor=1.0,
                                  max_keypoints=KEYPOINTS_MAX,
                                  normalized=NORMALIZED,
                                  roi=img_face)

        if (kpts):
            #img.draw_keypoints(kpts,size=2,color=0)
            #print("keypoints found!")
            # Match the first set of keypoints with the second one
            c1 = image.match_descriptor(andy_kpts,
                                        kpts,
                                        threshold=MATCHING_THRESH)
            c2 = image.match_descriptor(aya_kpts,
                                        kpts,
                                        threshold=MATCHING_THRESH)
            c3 = image.match_descriptor(leo_kpts,
                                        kpts,
                                        threshold=MATCHING_THRESH)
            c4 = image.match_descriptor(taiga_kpts,
                                        kpts,
                                        threshold=MATCHING_THRESH)
            # If more than 10% of the keypoints match draw the matching set
            # find the maximum in C1-4[6]
            c[0] = 0
            c[1] = c1[6]
            c[2] = c2[6]
    clock.tick()
    img = sensor.snapshot()
    if (kpts1 == None):
        # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
        kpts1 = img.find_keypoints(max_keypoints=150,
                                   threshold=20,
                                   scale_factor=1.35)
        draw_keypoints(img, kpts1)
    else:
        # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract
        # keypoints from the first scale only, which will match one of the scales in the first descriptor.
        kpts2 = img.find_keypoints(max_keypoints=150,
                                   threshold=10,
                                   normalized=True)
        if (kpts2):
            match = image.match_descriptor(kpts1, kpts2, threshold=85)
            if (match.count() > 10):
                # If we have at least n "good matches"
                # Draw bounding rectangle and cross.
                img.draw_rectangle(match.rect())
                img.draw_cross(match.cx(), match.cy(), size=10)

            print(kpts2, "matched:%d dt:%d" % (match.count(), match.theta()))

            coords = list(match.rect())
            #print(coords)

            #convert the xyz coordinates for uarm
            delta_y = (coords[2] / 2 + coords[0] - 160) / 20
            delta_x = (coords[3] / 2 + coords[1] - 120) / 20
Beispiel #9
0
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
    img = sensor.snapshot()
    img.draw_string(0, 0, "Please wait...")

d0 = None
#d0 = image.load_descriptor(image.LBP, "/desc.lbp")
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    
    objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
    if objects:
        face = objects[0]
        d1 = img.find_lbp(face)
        if (d0 == None):
            d0 = d1
        else:
            dist = image.match_descriptor(image.LBP, d0, d1)
            img.draw_string(0, 10, "Match %d%%"%(dist))

        img.draw_rectangle(face)
    # Draw FPS
    img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
Beispiel #10
0
def face_recog(pic_name, vi_ip):
    print("~~~~~~~~~~~~~~~~FACE_RECOG~~~~~~~~~~~~~~~~~~~~~~")
    gc.collect()  #garbage collection
    #find LBP value for snapshot saved in face_detect
    snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse()
    d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
    # turn on lights signaling facial recognition calculations starting
    pyb.LED(2).on()
    pyb.LED(3).on()
    #find LBP values for each image received in server_recv
    name_lbp_list = []
    uos.chdir(
        "/CamFaces"
    )  # change directory to where all the images from server_recv are stored
    for filename in uos.listdir("/CamFaces"):
        if filename.endswith(".pgm"):
            try:
                img = None
                img = image.Image(filename, copy_to_fb=True).mask_ellipse()
                sensor.alloc_extra_fb(
                    img.width(), img.height(),
                    sensor.GRAYSCALE)  #allocate more space for images
                d1 = img.find_lbp((0, 0, img.width(), img.height()))
                dist = image.match_descriptor(
                    d0, d1,
                    50)  #set threshold lower than 70 to tighten matching algo
                sensor.dealloc_extra_fb()
                # extracting the person's name from the file name
                pname = filename
                und_loc = pname.index('_')
                pname = pname[0:(und_loc)]
                # add the person's name and LBP value for the image to the list
                name_lbp_list.append(pname)
                name_lbp_list.append(dist)
                continue
            except Exception as e:
                print(e)
                print("error producing LBP value")
        else:
            print("file found that is not of type pgm")
    print(name_lbp_list)
    gc.collect()  #garbage collection
    # finding average LBP values for each name
    end = 0
    name_avg = []
    i = 0
    start = 0
    while i < len(name_lbp_list):  # for names 1 thru n-1
        if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] !=
                                               name_lbp_list[i + 2]):
            end = i + 2
            face = []
            face = name_lbp_list[start:end]
            print(face)
            j = 1
            sum_lbp = 0
            while j < len(face):
                sum_lbp += face[j]
                j += 2
            name_avg.append(face[0])
            name_avg.append(sum_lbp / (len(face) / 2))
            start = i + 2
        i += 2
    face = []
    face = name_lbp_list[(end):(len(name_lbp_list))]
    print(face)
    gc.collect()  #garbage collection
    # special case: find average LBP value for last name in list (name n)
    j = 1
    sum_lbp = 0
    while j < len(face):
        sum_lbp += face[j]
        j += 2
    name_avg.append(face[0])
    name_avg.append(sum_lbp / (len(face) / 2))
    print(name_avg)
    lbps = []
    k = 1
    while k < len(name_avg):
        lbps.append(name_avg[k])
        k += 2
    print(lbps)
    gc.collect()  #garbage collection
    # find minimum average LBP and associated person name
    min_lbp = min(lbps)
    print(min_lbp)
    ind = lbps.index(min(lbps))
    ind += 1
    found_person = name_avg[2 * ind - 2]
    id_name = "The person you are looking at is: " + found_person
    print(id_name)
    #delete snapshot of person
    uos.remove("/snapshot-person.pgm")
    # turn off lights signaling facial recognition calculations done
    pyb.LED(2).off()
    pyb.LED(3).off()
    #TCP client socket to send name of the person recognized to the visually impaired user's smartphone
    chost = vi_ip
    cport = 8080
    client = usocket.socket(
        usocket.AF_INET,
        usocket.SOCK_STREAM)  #TCP client socket with IPv4 addressing
    client.connect((chost, cport))
    print("connected to visually impaired user's smartphone")
    to_send = id_name + "\n"
    client.send(to_send.encode())
    print("sent name to phone")
    client.close()  #client closed
    gc.collect()  #garbage collection
    return
Beispiel #11
0
# Face recognition with LBP descriptors.
# See Timo Ahonen's "Face Recognition with Local Binary Patterns".
#
# Before running the example:
# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip
# 2) Exract and copy the orl_faces directory to the SD card root.

import sensor, time, image

SUB = "s2"
NUM_SUBJECTS = 5
NUM_SUBJECTS_IMGS = 10

img = image.Image("orl_faces/%s/1.pgm" % (SUB)).mask_ellipse()
d0 = img.find_lbp((0, 0, img.width(), img.height()))
img = None

print("")
for s in range(1, NUM_SUBJECTS + 1):
    dist = 0
    for i in range(2, NUM_SUBJECTS_IMGS + 1):
        img = image.Image("orl_faces/s%d/%d.pgm" % (s, i)).mask_ellipse()
        d1 = img.find_lbp((0, 0, img.width(), img.height()))
        dist += image.match_descriptor(image.LBP, d0, d1)
    print("Average dist for subject %d: %d" % (s, dist / NUM_SUBJECTS_IMGS))
Beispiel #12
0
                                   max_keypoints=100,
                                   roi=face)
        # Draw a rectangle around the first face
        img.draw_rectangle(objects[0])

# Draw keypoints
print(kpts1)
img.draw_keypoints(kpts1, size=12)
time.sleep(1000)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    # Extract keypoints using the detect face size as the ROI
    kpts2 = img.find_keypoints(scale_factor=1.2, max_keypoints=100)

    if (kpts2):
        # Match the first set of keypoints with the second one
        c = image.match_descriptor(kpts1, kpts2)
        match = c[6]  # C[6] contains the number of matches.
        if (match > 5):
            img.draw_rectangle(c[2:6])
            img.draw_cross(c[0], c[1], size=10)
            print(kpts2, "matched:%d dt:%d" % (match, c[7]))

    # Draw FPS
    img.draw_string(0, 0, "FPS:%.2f" % (clock.fps()))
    def start_face_rendering(self):
        sensor.reset() # Initialize the camera sensor.
        sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
        sensor.set_framesize(sensor.B128X128) # or sensor.QQVGA (or others)
        sensor.set_windowing((92,112))
        sensor.skip_frames(10) # Let new settings take affect.
        sensor.skip_frames(time = 5000) #等待5s
        s3 = Servo(3) # servo on position 1 (P7)
        #将蓝灯赋值给变量led
        led = pyb.LED(3) # Red LED = 1, Green LED = 2, Blue LED = 3, IR LEDs = 4.
        #SUB = "s1"
        NUM_SUBJECTS = 4 #图像库中不同人数,一共6人
        NUM_SUBJECTS_IMGS = 17 #每人有20张样本图片
        # 拍摄当前人脸。
        img = sensor.snapshot()
        #img = image.Image("singtown/%s/1.pgm"%(SUB))
        d0 = img.find_lbp((0, 0, img.width(), img.height()))
        #d0为当前人脸的lbp特征
        img = None
        pmin = 999999
        self.num=0

        for s in range(1, NUM_SUBJECTS+1):
            dist = 0
            for i in range(2, NUM_SUBJECTS_IMGS+1):
                img = image.Image("singtown/s%d/%d.pgm"%(s, i))
                d1 = img.find_lbp((0, 0, img.width(), img.height()))
                #d1为第s文件夹中的第i张图片的lbp特征
                dist += image.match_descriptor(d0, d1)#计算d0 d1即样本图像与被检测人脸的特征差异度。
            print("Average dist for subject %d: %d"%(s, dist/NUM_SUBJECTS_IMGS))
            pmin = self.min(pmin, dist/NUM_SUBJECTS_IMGS, s)#特征差异度越小,被检测人脸与此样本更相似更匹配。
            print(pmin)

        print(self.num) # num为当前最匹配的人的编号。
        #TS=3 没戴口罩
        if (pmin>5000) & (TS==3):
            uart.write("-- NO People! --")
            led.off()
        if (pmin>5000) & (TS==1):

            uart.write("-- NO People! --")
            led.off()
        if pmin<=5000:
            if self.num==1:     #匹配到了people_One
                    uart.write("People One      ")
            if self.num==2:
                    uart.write("People Two      ")
            if self.num==3:
                    uart.write("People Three    ")
            if self.num==4:
                    uart.write("People New      ")
            led.on()            #亮灯
            led1.off()
            time.sleep(3500)     #延时1500ms
            led.off()
            for i in range(1,460):
                s3.speed(50) # for continuous rotation servos
                time.sleep(15)
            s3.speed(0)
            time.sleep(1500)
            for i in range(1,230):
                s3.speed(-50)
                time.sleep(15)
            s3.speed(0)
 img = sensor.snapshot()
 if (kpts1 == None):
     # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
     kpts1 = img.find_keypoints(max_keypoints=150,
                                threshold=10,
                                scale_factor=1.2)
     draw_keypoints(img, kpts1)
 else:
     # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract
     # keypoints from the first scale only, which will match one of the scales in the first descriptor.
     kpts100 = img.find_keypoints(max_keypoints=150,
                                  threshold=10,
                                  normalized=1)
     if (kpts100):
         if (imgtarget == '1'):
             match = image.match_descriptor(kpts1, kpts100, threshold=85)
         elif (imgtarget == '2'):
             match = image.match_descriptor(kpts2, kpts100, threshold=85)
         elif (imgtarget == '3'):
             match = image.match_descriptor(kpts3, kpts100, threshold=85)
         elif (imgtarget == '4'):
             match = image.match_descriptor(kpts4, kpts100, threshold=85)
         if (match.count() > 10):
             # If we have at least n "good matches"
             # Draw bounding rectangle and cross.
             #img.draw_rectangle(match.rect())
             img.draw_cross(match.cx(), match.cy(), size=10)
             output_str = "[%d,%d]" % (match.cx(), match.cy())
             uart.write(output_str + '\r\n')
             print(kpts100,
                   "matched:%d dt:%d" % (match.count(), match.theta()))
Beispiel #15
0
    img = sensor.snapshot()
    img.draw_string(0, 0, "Please wait...")

kpts1 = None
# Uncomment to load keypoints from file
#kpts1 = image.load_descriptor(image.FREAK, "/desc.freak")
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    kpts2 = img.find_keypoints(threshold=KEYPOINTS_THRESH,
                               normalized=NORMALIZED)

    if (kpts1 == None):
        kpts1 = kpts2
        print(kpts1)
    elif kpts2:
        c = image.match_descriptor(image.FREAK,
                                   kpts1,
                                   kpts2,
                                   threshold=MATCHING_THRESH)
        # C[3] contains the percentage of matching keypoints.
        # If more than 25% of the keypoints match, draw stuff.
        if (c[2] > 25):
            img.draw_cross(c[0], c[1], size=15)
            img.draw_string(0, 10, "Match %d%%" % (c[2]))

    # Draw FPS
    img.draw_string(0, 0, "FPS:%.2f" % (clock.fps()))
Beispiel #16
0
def face_recog(pic_name, vi_ip):
    print("~~~~~~~~~~~~~~~~FACE_RECOG~~~~~~~~~~~~~~~~~~~~~~")
    gc.collect()
    snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse()
    d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
    pyb.LED(2).on()
    pyb.LED(3).on()
    name_lbp_list = []
    uos.chdir("/CamFaces")
    for filename in uos.listdir("/CamFaces"):
        if filename.endswith(".pgm"):
            try:
                img = None
                img = image.Image(filename, copy_to_fb=True).mask_ellipse()
                sensor.alloc_extra_fb(img.width(), img.height(),
                                      sensor.GRAYSCALE)
                d1 = img.find_lbp((0, 0, img.width(), img.height()))
                dist = image.match_descriptor(d0, d1, 50)
                sensor.dealloc_extra_fb()
                pname = filename
                und_loc = pname.index('_')
                pname = pname[0:(und_loc)]
                name_lbp_list.append(pname)
                name_lbp_list.append(dist)
                continue
            except Exception as e:
                print(e)
                print("error producing LBP value")
        else:
            print("file found that is not of type pgm")
    print(name_lbp_list)
    gc.collect()
    end = 0
    name_avg = []
    i = 0
    start = 0
    while i < len(name_lbp_list):
        if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] !=
                                               name_lbp_list[i + 2]):
            end = i + 2
            face = []
            face = name_lbp_list[start:end]
            print(face)
            j = 1
            sum_lbp = 0
            while j < len(face):
                sum_lbp += face[j]
                j += 2
            name_avg.append(face[0])
            name_avg.append(sum_lbp / (len(face) / 2))
            start = i + 2
        i += 2
    face = []
    face = name_lbp_list[(end):(len(name_lbp_list))]
    print(face)
    gc.collect()
    j = 1
    sum_lbp = 0
    while j < len(face):
        sum_lbp += face[j]
        j += 2
    name_avg.append(face[0])
    name_avg.append(sum_lbp / (len(face) / 2))
    print(name_avg)
    lbps = []
    k = 1
    while k < len(name_avg):
        lbps.append(name_avg[k])
        k += 2
    print(lbps)
    gc.collect()
    min_lbp = min(lbps)
    print(min_lbp)
    ind = lbps.index(min(lbps))
    ind += 1
    found_person = name_avg[2 * ind - 2]
    id_name = "The person you are looking at is: " + found_person
    print(id_name)
    uos.remove("/snapshot-person.pgm")
    pyb.LED(2).off()
    pyb.LED(3).off()
    chost = vi_ip
    cport = 8080
    client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
    client.connect((chost, cport))
    print("connected to visually impaired user's smartphone")
    to_send = id_name + "\n"
    client.send(to_send.encode())
    print("sent name to phone")
    client.close()
    gc.collect()
    return
        kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face)
        # Draw a rectangle around the first face
        img.draw_rectangle(objects[0])

# Draw keypoints
print(kpts1)
img.draw_keypoints(kpts1, size=24)
img = sensor.snapshot()
time.sleep(2000)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    # Extract keypoints from the whole frame
    kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True)

    if (kpts2):
        # Match the first set of keypoints with the second one
        c=image.match_descriptor(kpts1, kpts2, threshold=85)
        match = c[6] # C[6] contains the number of matches.
        if (match>5):
            img.draw_rectangle(c[2:6])
            img.draw_cross(c[0], c[1], size=10)
            print(kpts2, "matched:%d dt:%d"%(match, c[7]))

    # Draw FPS
    img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
Beispiel #18
0
            pmin = 999999 # pmin为最小特征差异度
            iden_out_num=0

            def min(pmin, a, s): # 确保几次循环后pmin还是最小特征差异度,a代表dist/NUM_SUBJECTS_IMGS
                global iden_out_num
                if a<pmin:
                    pmin=a
                    iden_out_num=s
                return pmin

            for s in range(1, NUM_SUBJECTS+1):
                dist = 0
                for i in range(2, NUM_SUBJECTS_IMGS+1):
                    img = image.Image("identify/i%s/%s.pgm"%(s, i))
                    d1 = img.find_lbp((11,14,70,84)) # d1为第s文件夹中的第i张图片的lbp特征,识别ROI区域往内缩小1/8
                    dist += image.match_descriptor(d0, d1) # 计算d0 d1即样本图像与被检测人脸的特征差异度。
                print("第%d个的特征差异度是: %d"%(s, dist/NUM_SUBJECTS_IMGS)) # 输出当前特征差异度,方便在串口终端显示数据
                pmin = min(pmin, dist/NUM_SUBJECTS_IMGS, s) # 特征差异度越小,被检测人脸与此样本更相似更匹配。
                print(pmin)

            # 按照循环次数,将每次识别后的结果存入,共3次
            if iden == 0:
                iden_out_num0 = iden_out_num
                print("第%d次:%d"%(iden+1,iden_out_num0))
            if iden == 1:
                iden_out_num1 = iden_out_num
                print("第%d次:%d"%(iden+1,iden_out_num1))
            if iden == 2:
                iden_out_num2 = iden_out_num
                print("第%d次:%d"%(iden+1,iden_out_num2))
Beispiel #19
0
def facial_recog(pic_name, vi_ip):
    cc = 0
    snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse()
    d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height()))
    # face recognition
    pyb.LED(2).on()
    name_lbp_list = []
    uos.chdir(
        "/Faces"
    )  # change directory to where all the webex photos from tcp are stored
    for filename in uos.listdir("/Faces"):
        if filename.endswith(".pgm"):
            try:
                img = None
                img = image.Image(filename, copy_to_fb=True).mask_ellipse()
                sensor.alloc_extra_fb(img.width(), img.height(),
                                      sensor.GRAYSCALE)
                d1 = img.find_lbp((0, 0, img.width(), img.height()))
                dist = image.match_descriptor(d0, d1, 50)
                sensor.dealloc_extra_fb()
                word = filename
                #print(filename)
                und_loc = word.index('_')
                word = word[0:(und_loc)]
                name_lbp_list.append(word)
                name_lbp_list.append(dist)
                continue
            except Exception as e:
                print(e)
                print("error reading file")
        else:
            print("file found that is not of type pgm")
    print(name_lbp_list)
    #print(len(name_lbp_list))
    end = 0
    name_avg = []
    i = 0
    start = 0
    while i < len(name_lbp_list):
        if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] !=
                                               name_lbp_list[i + 2]):
            end = i + 2
            #print(start)
            #print(end)
            face = []
            face = name_lbp_list[start:end]
            print(face)
            j = 1
            sum_lbp = 0
            while j < len(face):
                sum_lbp += face[j]
                j += 2
            name_avg.append(face[0])
            name_avg.append(sum_lbp / (len(face) / 2))
            start = i + 2
        i += 2
    face = []
    face = name_lbp_list[(end):(len(name_lbp_list))]
    print(face)
    j = 1
    sum_lbp = 0
    while j < len(face):
        sum_lbp += face[j]
        j += 2
    name_avg.append(face[0])
    name_avg.append(sum_lbp / (len(face) / 2))
    print(name_avg)
    lbps = []
    k = 1
    while k < len(name_avg):
        lbps.append(name_avg[k])
        k += 2
    print(lbps)
    #print(len(lbps))
    min_lbp = min(lbps)
    print(min_lbp)
    ind = lbps.index(min(lbps))
    #print(ind)
    ind += 1
    found_person = name_avg[2 * ind - 2]
    id_name = "The person you are looking at is: " + found_person
    print(id_name)
    #delete snapshot of person
    uos.remove("/snapshot-person.pgm")
    pyb.LED(2).off()
    cc += 1
    print(cc)
    #client socket
    chost = vi_ip
    print(chost)
    #chost = "10.132.30.198"
    cport = 8080
    client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM)
    client.connect((chost, cport))
    print("connected to android")
    to_send = id_name + "\n"
    # Send HTTP request and recv response
    client.send(to_send.encode())
    # Close socket
    client.close()
    gc.collect()
Beispiel #20
0
# Face recognition with LBP descriptors.
# See Timo Ahonen's "Face Recognition with Local Binary Patterns".
#
# Before running the example:
# 1) Download the AT&T faces database http://www.cl.cam.ac.uk/Research/DTG/attarchive/pub/data/att_faces.zip
# 2) Exract and copy the orl_faces directory to the SD card root.
#
# NOTE: This is just a PoC implementation of the paper mentioned above, it does Not work well in real life conditions.

import sensor, time, image

SUB = "s2"
NUM_SUBJECTS = 5
NUM_SUBJECTS_IMGS = 10

img = image.Image("orl_faces/%s/1.pgm" % (SUB)).mask_ellipse()
d0 = img.find_lbp((0, 0, img.width(), img.height()))
img = None

print("")
for s in range(1, NUM_SUBJECTS + 1):
    dist = 0
    for i in range(2, NUM_SUBJECTS_IMGS + 1):
        img = image.Image("orl_faces/s%d/%d.pgm" % (s, i)).mask_ellipse()
        d1 = img.find_lbp((0, 0, img.width(), img.height()))
        dist += image.match_descriptor(d0, d1)
    print("Average dist for subject %d: %d" % (s, dist / NUM_SUBJECTS_IMGS))
        face = (objects[0][0]-22, objects[0][1]-22,objects[0][2]+22*2, objects[0][3]+22*2)
        # Extract keypoints using the detect face size as the ROI
        kpts1 = img.find_keypoints(threshold=KEYPOINTS_THRESH, normalized=NORMALIZED, roi=face)
        # Draw a rectangle around the first face
        img.draw_rectangle(objects[0])

# Draw keypoints
print(kpts1)
img.draw_keypoints(kpts1, size=12)
time.sleep(1000)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()
    img = sensor.snapshot()
    # Extract keypoints using the detect face size as the ROI
    kpts2 = img.find_keypoints(threshold=KEYPOINTS_THRESH, normalized=NORMALIZED)

    if (kpts2):
        # Match the first set of keypoints with the second one
        c=image.match_descriptor(image.FREAK, kpts1, kpts2, threshold=MATCHING_THRESH)
        # If more than 10% of the keypoints match draw the matching set
        if (c[2]>25):
            img.draw_cross(c[0], c[1], size=5)
            img.draw_string(0, 10, "Match %d%%"%(c[2]))

    # Draw FPS
    img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
Beispiel #22
0
    if img_objects:
            print("a new face detected!")
            # Draw a rectangle around the first face
            #img.draw_rectangle(img_objects[0])
            # Expand the ROI by 11 pixels in each direction (half the pattern scale)
            img_face = (img_objects[0][0]-31, img_objects[0][1]-31,img_objects[0][2]+31*2, img_objects[0][3]+31*2)
            # Extract keypoints using the detect face size as the ROI
            kpts2 = img.find_keypoints(threshold=10, scale_factor=1.3, max_keypoints=100, normalized=True)
    # Extract keypoints using the detect face size as the ROI
    #kpts2 = img.find_keypoints(threshold=KEYPOINTS_THRESH, normalized=NORMALIZED)

            if (kpts2):
                img.draw_rectangle(img_objects[0])
                print("keypoints found!")
                # Match the first set of keypoints with the second one
                c1 = image.match_descriptor(hjf_kpts, kpts2,threshold=60)
                c2 = image.match_descriptor(sf_kpts, kpts2,threshold=60)
                c3 = image.match_descriptor(hzy_kpts, kpts2,threshold=60)
                c4 = image.match_descriptor(hmy_kpts, kpts2,threshold=60)
                # If more than 10% of the keypoints match draw the matching set
                # find the maximum in C1-4[6]
                c[0]= 0
                c[1]=c1[6]
                c[2]=c2[6]
                c[3]=c3[6]
                c[4]=c4[6]
                print(c)
                c[0]=c[1] #init c[0]
                m=1
                for j in range(2):
                    if (c[0]<c[j+2] ):
Beispiel #23
0
def recognition(timeout=500):
    face = None
    img = None

    matchMin = 999999
    matchUser = ''
    matchArr = []

    basePath = "photo"
    #basePath = "desc"
    users = os.listdir(basePath)

    time_start = pyb.millis()
    while not face:
        if pyb.elapsed_millis(time_start) > timeout:
            break
        img = sensor.snapshot()
        face = facsTest(img)
        checkDisplay(img)
    if not face:
        return matchUser, face, img

    if not len(users):
        # pyb.delay(timeout)
        return matchUser, face, img

    nowDesc = img.find_lbp(face)

    photoFpath = ""
    try:
        for user in users:
            userDescArr = []
            baseDpath = "%s/%s" % (basePath, user)
            files = os.listdir(baseDpath)
            for file_ in files:
                # descFpath = baseDpath+"/"+file_
                # oldDesc = image.load_descriptor(descFpath)

                photoFpath = baseDpath + "/" + file_
                oldImg = image.Image(photoFpath)
                oldDesc = oldImg.find_lbp(
                    (0, 0, oldImg.width(), oldImg.height()))

                match = image.match_descriptor(nowDesc, oldDesc,
                                               DESC_THRESHOLD,
                                               DESC_FILTER_OUTLIERS)
                userDescArr.append(match)
            userDescArr.sort()
            sliceCnt = CHECK_MIN_CNT or len(userDescArr)
            matchResult = sum(userDescArr[:sliceCnt]) / sliceCnt
            matchArr.append(matchResult)
            # print("sliceCnt,userDescArr: ",sliceCnt,userDescArr)
            if matchResult < matchMin:
                matchMin = matchResult
                if matchResult < MATCH_THRESHOLD:
                    matchUser = user
    except:  # OSError,err:
        print("recognition error:", photoFpath)  # ,err)

    print(matchMin, matchUser, matchArr,
          len(matchArr) >= 2 and (matchArr[0] - matchArr[1]))
    return matchUser, face, img