def face_recog(calc_time, vi_ip): pin = pyb.millis() print(pin) print(calc_time) cc = 0 #pyb.elapsed_millis(start) while pyb.elapsed_millis(pin) < calc_time: print("top of face recog function") #snapshot on face detection RED_LED_PIN = 1 BLUE_LED_PIN = 3 sensor.reset() # Initialize the camera sensor. sensor.set_contrast(3) sensor.set_gainceiling(16) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.HQVGA) # or sensor.QQVGA (or others) #sensor.alloc_extra_fb() sensor.skip_frames(time=2000) # Let new settings take affect. face_cascade = image.HaarCascade("frontalface", stages=25) uos.chdir("/") pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() diff = 10 # We'll say we detected a face after 10 frames. try: while (diff): img = sensor.snapshot() sensor.alloc_extra_fb(img.width(), img.height(), sensor.GRAYSCALE) faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) sensor.dealloc_extra_fb() if faces: diff -= 1 for r in faces: img.draw_rectangle(r) elif (pyb.elapsed_millis(pin)) > calc_time: raise Exception pyb.LED(BLUE_LED_PIN).off() print("Face detected! Saving image...") pic_name = "snapshot-person.pgm" sensor.snapshot().save( pic_name) # Save Pic. to root of SD card -- uos.chdir("/") pyb.delay(100) facial_recog(pic_name, vi_ip) gc.collect() except Exception as go: print("we are in exception") pyb.LED(BLUE_LED_PIN).off() gc.collect()
def face_detect(init_start, calc_time): print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~") gc.collect() #garbage collection while pyb.elapsed_millis(init_start) < calc_time: #while time not expired #snapshot on face detection RED_LED_PIN = 1 BLUE_LED_PIN = 3 sensor.reset() # Initialize the camera sensor. sensor.set_contrast(3) #set to highest contrast setting sensor.set_gainceiling(16) sensor.set_pixformat( sensor.GRAYSCALE) #grayscale for facial recognition sensor.set_framesize(sensor.HQVGA) sensor.skip_frames(time=2000) # Let new settings take affect. face_cascade = image.HaarCascade( "frontalface", stages=25) #Using Frontal Face Haar Cascade Classifier uos.chdir("/") pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") sensor.skip_frames(time=2000) # Give the user time to get ready. pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() diff = 10 # We'll say we detected a face after 10 frames. try: while (diff): img = sensor.snapshot() sensor.alloc_extra_fb( img.width(), img.height(), sensor.GRAYSCALE) #allocate more space for image faces = img.find_features( face_cascade, threshold=0.5, scale_factor=1.5) #detecting face features sensor.dealloc_extra_fb() if faces: diff -= 1 for r in faces: img.draw_rectangle(r) elif (pyb.elapsed_millis(init_start) ) > calc_time: #if time is expired, leave function raise Exception pyb.LED(BLUE_LED_PIN).off() print("Face detected! Saving image...") pic_name = "snapshot-person.pgm" sensor.snapshot().save(pic_name) # Save Pic. to root of SD card pyb.delay(100) gc.collect() #garbage collection return pic_name except Exception as go: print("exception - time expired") pyb.LED(BLUE_LED_PIN).off() gc.collect() #garbage collection
def face_detect(init_start, calc_time): print("~~~~~~~~~~~~~~~~FACE_DETECT~~~~~~~~~~~~~~~~~~~~~~") gc.collect() while pyb.elapsed_millis(init_start) < calc_time: RED_LED_PIN = 1 BLUE_LED_PIN = 3 sensor.reset() sensor.set_contrast(3) sensor.set_gainceiling(16) sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.HQVGA) sensor.skip_frames(time=2000) face_cascade = image.HaarCascade("frontalface", stages=25) uos.chdir("/") pyb.LED(RED_LED_PIN).on() print("About to start detecting faces...") sensor.skip_frames(time=2000) pyb.LED(RED_LED_PIN).off() print("Now detecting faces!") pyb.LED(BLUE_LED_PIN).on() diff = 10 try: while (diff): img = sensor.snapshot() sensor.alloc_extra_fb(img.width(), img.height(), sensor.GRAYSCALE) faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5) sensor.dealloc_extra_fb() if faces: diff -= 1 for r in faces: img.draw_rectangle(r) elif (pyb.elapsed_millis(init_start)) > calc_time: raise Exception pyb.LED(BLUE_LED_PIN).off() print("Face detected! Saving image...") pic_name = "snapshot-person.pgm" sensor.snapshot().save(pic_name) pyb.delay(100) gc.collect() return pic_name except Exception as go: print("exception - time expired") pyb.LED(BLUE_LED_PIN).off() gc.collect()
def face_recog(pic_name, vi_ip): print("~~~~~~~~~~~~~~~~FACE_RECOG~~~~~~~~~~~~~~~~~~~~~~") gc.collect() #garbage collection #find LBP value for snapshot saved in face_detect snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse() d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height())) # turn on lights signaling facial recognition calculations starting pyb.LED(2).on() pyb.LED(3).on() #find LBP values for each image received in server_recv name_lbp_list = [] uos.chdir( "/CamFaces" ) # change directory to where all the images from server_recv are stored for filename in uos.listdir("/CamFaces"): if filename.endswith(".pgm"): try: img = None img = image.Image(filename, copy_to_fb=True).mask_ellipse() sensor.alloc_extra_fb( img.width(), img.height(), sensor.GRAYSCALE) #allocate more space for images d1 = img.find_lbp((0, 0, img.width(), img.height())) dist = image.match_descriptor( d0, d1, 50) #set threshold lower than 70 to tighten matching algo sensor.dealloc_extra_fb() # extracting the person's name from the file name pname = filename und_loc = pname.index('_') pname = pname[0:(und_loc)] # add the person's name and LBP value for the image to the list name_lbp_list.append(pname) name_lbp_list.append(dist) continue except Exception as e: print(e) print("error producing LBP value") else: print("file found that is not of type pgm") print(name_lbp_list) gc.collect() #garbage collection # finding average LBP values for each name end = 0 name_avg = [] i = 0 start = 0 while i < len(name_lbp_list): # for names 1 thru n-1 if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] != name_lbp_list[i + 2]): end = i + 2 face = [] face = name_lbp_list[start:end] print(face) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) start = i + 2 i += 2 face = [] face = name_lbp_list[(end):(len(name_lbp_list))] print(face) gc.collect() #garbage collection # special case: find average LBP value for last name in list (name n) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) print(name_avg) lbps = [] k = 1 while k < len(name_avg): lbps.append(name_avg[k]) k += 2 print(lbps) gc.collect() #garbage collection # find minimum average LBP and associated person name min_lbp = min(lbps) print(min_lbp) ind = lbps.index(min(lbps)) ind += 1 found_person = name_avg[2 * ind - 2] id_name = "The person you are looking at is: " + found_person print(id_name) #delete snapshot of person uos.remove("/snapshot-person.pgm") # turn off lights signaling facial recognition calculations done pyb.LED(2).off() pyb.LED(3).off() #TCP client socket to send name of the person recognized to the visually impaired user's smartphone chost = vi_ip cport = 8080 client = usocket.socket( usocket.AF_INET, usocket.SOCK_STREAM) #TCP client socket with IPv4 addressing client.connect((chost, cport)) print("connected to visually impaired user's smartphone") to_send = id_name + "\n" client.send(to_send.encode()) print("sent name to phone") client.close() #client closed gc.collect() #garbage collection return
def face_recog(pic_name, vi_ip): print("~~~~~~~~~~~~~~~~FACE_RECOG~~~~~~~~~~~~~~~~~~~~~~") gc.collect() snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse() d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height())) pyb.LED(2).on() pyb.LED(3).on() name_lbp_list = [] uos.chdir("/CamFaces") for filename in uos.listdir("/CamFaces"): if filename.endswith(".pgm"): try: img = None img = image.Image(filename, copy_to_fb=True).mask_ellipse() sensor.alloc_extra_fb(img.width(), img.height(), sensor.GRAYSCALE) d1 = img.find_lbp((0, 0, img.width(), img.height())) dist = image.match_descriptor(d0, d1, 50) sensor.dealloc_extra_fb() pname = filename und_loc = pname.index('_') pname = pname[0:(und_loc)] name_lbp_list.append(pname) name_lbp_list.append(dist) continue except Exception as e: print(e) print("error producing LBP value") else: print("file found that is not of type pgm") print(name_lbp_list) gc.collect() end = 0 name_avg = [] i = 0 start = 0 while i < len(name_lbp_list): if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] != name_lbp_list[i + 2]): end = i + 2 face = [] face = name_lbp_list[start:end] print(face) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) start = i + 2 i += 2 face = [] face = name_lbp_list[(end):(len(name_lbp_list))] print(face) gc.collect() j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) print(name_avg) lbps = [] k = 1 while k < len(name_avg): lbps.append(name_avg[k]) k += 2 print(lbps) gc.collect() min_lbp = min(lbps) print(min_lbp) ind = lbps.index(min(lbps)) ind += 1 found_person = name_avg[2 * ind - 2] id_name = "The person you are looking at is: " + found_person print(id_name) uos.remove("/snapshot-person.pgm") pyb.LED(2).off() pyb.LED(3).off() chost = vi_ip cport = 8080 client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM) client.connect((chost, cport)) print("connected to visually impaired user's smartphone") to_send = id_name + "\n" client.send(to_send.encode()) print("sent name to phone") client.close() gc.collect() return
def facial_recog(pic_name, vi_ip): cc = 0 snap_img = image.Image(pic_name, copy_to_fb=True).mask_ellipse() d0 = snap_img.find_lbp((0, 0, snap_img.width(), snap_img.height())) # face recognition pyb.LED(2).on() name_lbp_list = [] uos.chdir( "/Faces" ) # change directory to where all the webex photos from tcp are stored for filename in uos.listdir("/Faces"): if filename.endswith(".pgm"): try: img = None img = image.Image(filename, copy_to_fb=True).mask_ellipse() sensor.alloc_extra_fb(img.width(), img.height(), sensor.GRAYSCALE) d1 = img.find_lbp((0, 0, img.width(), img.height())) dist = image.match_descriptor(d0, d1, 50) sensor.dealloc_extra_fb() word = filename #print(filename) und_loc = word.index('_') word = word[0:(und_loc)] name_lbp_list.append(word) name_lbp_list.append(dist) continue except Exception as e: print(e) print("error reading file") else: print("file found that is not of type pgm") print(name_lbp_list) #print(len(name_lbp_list)) end = 0 name_avg = [] i = 0 start = 0 while i < len(name_lbp_list): if ((i + 2) < len(name_lbp_list)) and (name_lbp_list[i] != name_lbp_list[i + 2]): end = i + 2 #print(start) #print(end) face = [] face = name_lbp_list[start:end] print(face) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) start = i + 2 i += 2 face = [] face = name_lbp_list[(end):(len(name_lbp_list))] print(face) j = 1 sum_lbp = 0 while j < len(face): sum_lbp += face[j] j += 2 name_avg.append(face[0]) name_avg.append(sum_lbp / (len(face) / 2)) print(name_avg) lbps = [] k = 1 while k < len(name_avg): lbps.append(name_avg[k]) k += 2 print(lbps) #print(len(lbps)) min_lbp = min(lbps) print(min_lbp) ind = lbps.index(min(lbps)) #print(ind) ind += 1 found_person = name_avg[2 * ind - 2] id_name = "The person you are looking at is: " + found_person print(id_name) #delete snapshot of person uos.remove("/snapshot-person.pgm") pyb.LED(2).off() cc += 1 print(cc) #client socket chost = vi_ip print(chost) #chost = "10.132.30.198" cport = 8080 client = usocket.socket(usocket.AF_INET, usocket.SOCK_STREAM) client.connect((chost, cport)) print("connected to android") to_send = id_name + "\n" # Send HTTP request and recv response client.send(to_send.encode()) # Close socket client.close() gc.collect()
exit(1) else: print("Could not configure the remote cam!") exit(1) # skip frames as this is exactly what the remote is doing on its side sensor.skip_frames(time=2000) # save the ref image used for the diff print("About to save background image...") ref_img = sensor.alloc_extra_fb(img_width, img_height, sensor_format) data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565) ref_img.replace(sensor.snapshot().remap(data_fb, right=False, upside_down=True)) sensor.dealloc_extra_fb() print("Saved background image - Now frame differencing!") # now add an additional part that will convey the mask info sensor.set_windowing((int((sensor.width() - img_width) / 2) - 2, int((sensor.height() - img_height) / 2), img_width, img_height + mask_height)) time.sleep(500) clock = time.clock() idx = 0 data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565) right_image = image.Image(img_width, img_height + mask_height, sensor_format, copy_to_fb=False)