/
facerec.py
69 lines (54 loc) · 1.84 KB
/
facerec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import cv2
import glob
import os
import sys
import select
import config
import face
import Database_1
def is_letter_input(letter):
# Utility function to check if a specific character is available on stdin.
# Comparison is case insensitive.
if select.select([sys.stdin,],[],[],0.0)[0]:
input_char = sys.stdin.read(1)
return input_char.lower() == letter.lower()
return False
if __name__ == '__main__':
count = 0
# Load training data into model
print 'Loading training data...'
model = cv2.createLBPHFaceRecognizer()
model.load(config.Training_Xml)
print 'Training data loaded!'
# Initialize camer and box.
camera = config.get_camera()
# Move box to locked position.
print 'Running box...'
#print 'Press button to lock (if unlocked), or unlock if the correct face is detected.'
print 'Press Ctrl-C to quit.'
while True:
# Check if capture should be made.
# Check if button is pressed.
if is_letter_input('c'):
print 'Button pressed, looking for face...'
# Check for the positive face and unlock if found.
image = camera.read()
# Convert image to grayscale.
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Get coordinates of single face in captured image.
result = face.detect_single(image)
if result is None:
print 'Could not detect single face! Check the image in capture.pgm' \
' to see what was captured and try again with only one face visible.'
continue
x, y, w, h = result
# Crop and resize image to face.
crop = face.resize(face.crop(image, x, y, w, h))
# Test face against model.
label, confidence = model.predict(crop)
if confidence < config.Threshold:
print 'Recognized face!'
else:
print 'Did not recognize face!'
print 'name = {0} with confidence {1}'.format(label,confidence)
Database_1.data_entry(label)