Ejemplo n.º 1
0
def convert():
	# Create the directory for positive training images if it doesn't exist.
	if not os.path.exists(config.TRAINING_DIR + CAPTURE_DIR):
		os.makedirs(config.TRAINING_DIR + CAPTURE_DIR)
	# Find the largest ID of existing positive images.
	# Start new images after this ID value.
	files = sorted(glob.glob(os.path.join(config.TRAINING_DIR + CAPTURE_DIR, 
		'[0-9][0-9][0-9].pgm')))
	count = 0
	if len(files) > 0:
		# Grab the count from the last filename.
		count = int(files[-1][-7:-4])+1
	for filename in walk_files(RAW_DIR, '*.jpg'):
			image = filename
			image = cv2.imread(image)
			image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
			# Get coordinates of single face in captured image.
			result = face.detect_single(image)
			if result is None:
				print 'Could not detect single face!'
				continue
			x, y, w, h = result
			# Crop image as close as possible to desired face aspect ratio.
			# Might be smaller if face is near edge of image.
			crop = face.crop(image, x, y, w, h)
			# Save image to file.
			filename = os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '%03d.pgm' % count)
			cv2.imwrite(filename, crop)
			print 'Found face and wrote training image', filename
			count += 1
Ejemplo n.º 2
0
def training(root):
        fn_name = askname()
        camera = config.get_camera()
        if not os.path.exists(config.POSITIVE_DIR):
                os.makedirs(config.POSITIVE_DIR)
        path = os.path.join(config.POSITIVE_DIR,fn_name)
        if not os.path.isdir(path):
            os.mkdir(path)
        files = sorted(glob.glob(os.path.join(path,'[0-9][0-9][0-9].pgm')))
        count = 0
        if len(files) > 0:
                count = int(files[-1][-7:-4])+1
        c =0
        while c < 2 :
                        image = camera.read()
                        # Convert image to grayscale.
                        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                        # Get coordinates of single face in captured image.
                        result = face.detect_single(image)
                        if result is None:
                                print 'Could not detect single face!  Check the image in capture.pgm' \
                                          ' to see what was captured and try again with only one face visible.'
                                continue
                        x, y, w, h = result
                        # Crop image as close as possible to desired face aspect ratio.
                        # Might be smaller if face is near edge of image.
                        crop = face.crop(image, x, y, w, h)
                        # Save image to file.
                        filename = os.path.join(path,'%03d.pgm' % count)
                        cv2.imwrite(filename, crop)
                        print 'Found face and wrote training image', filename
                        c += 1
                        count += 1
        popuptrained()
Ejemplo n.º 3
0
def check():
    """Checks if it can recognize a face. Returns name is successful and None otherwise"""
    print("Looking for face...")
    image = camera.read()
    image = cv2.cvtColor(image)
    # Get coordinates of single face in captured image.
    result = face.detect_single(image)
    if result is None:
        print("Could not detect face!")
        return
    x, y, w, h = result
    # Crop and resize image to face.
    crop = face.resize(face.crop(image, x, y, w, h))
    # Test face against models.
    prob = {}
    for file, model in models.items():
        label, confidence = model.predict(crop)
        print('Predicted {0} face with confidence {1} for {2}'.format(
            'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE',
            confidence, file))
        if label == config.POSITIVE_LABEL:
            prob[file] = confidence
    name = None
    conf = 10000000
    for file, confidence in prob.items():
        print("Checking {0} with {1}".format(file, confidence))
        if confidence < conf:
            conf = confidence
            name = file
    return name
Ejemplo n.º 4
0
def face_check():
    #Only run face_check() for a certain amount of time according to the config file
    time_out_start = time.time()
    while time.time() < time_out_start + config.TIME_OUT:
        # Initialize camera'
        camera = cv2.VideoCapture(config.WEBCAM_ID)
        ret1, frame1 = camera.read()
        image = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        result = face.detect_single(image)
        #Check if any face presents
        if result is None:
            lcd.clear()
            lcd.message('NO FACE DETECTED' + '\n')
            GPIO.output(Relay_Pin, GPIO.LOW)
        else:
            lcd.clear()
            x, y, w, h = result
            crop = face.resize(face.crop(image, x, y, w, h))
            label, confidence = model.predict(crop)
            if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
                lcd.clear()
                lcd.message(str(label) + ' OK. OPEN LOCK!' + '\n' + str(confidence) + '\n')
                GPIO.output(Relay_Pin, GPIO.HIGH)
            else:
                lcd.clear()
                lcd.message(str(label) + ' NOT RECOGNIZED' + '\n' + str(confidence)+ '\n')
                GPIO.output(Relay_Pin, GPIO.LOW)
        camera.release()
def convert():
    # Create the directory for positive training images if it doesn't exist.
    if not os.path.exists(config.TRAINING_DIR + CAPTURE_DIR):
        os.makedirs(config.TRAINING_DIR + CAPTURE_DIR)
    # Find the largest ID of existing positive images.
    # Start new images after this ID value.
    files = sorted(glob.glob(os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '[0-9][0-9][0-9].pgm')))
    count = 0
    if len(files) > 0:
        # Grab the count from the last filename.
        count = int(files[-1][-7:-4]) + 1
    for filename in walk_files(RAW_DIR, '*.jpg'):
        image = filename
        image = cv2.imread(image)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        # Get coordinates of single face in captured image.
        result = face.detect_single(image)
        if result is None:
            print 'Could not detect single face!'
            continue
        x, y, w, h = result
        # Crop image as close as possible to desired face aspect ratio.
        # Might be smaller if face is near edge of image.
        crop = face.crop(image, x, y, w, h)
        # Save image to file.
        filename = os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '%03d.pgm' % count)
        cv2.imwrite(filename, crop)
        print 'Found face and wrote training image', filename
        count += 1
Ejemplo n.º 6
0
def classify(model,model1):
    print "predicting"
    image = camera.read()
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    result = face.detect_single(image)
    if result is None:
        print 'Could not detect single face!  Check the image in capture.pgm' \
		  ' to see what was captured and try again with only one face visible.'
        
        return False
    print "Single face detected"
    x, y, w, h = result
    crop = face.resize(face.crop(image, x, y, w, h))
    # Test face against model.
    label, confidence = model.predict(crop)
    label1, confidence1 = model1.predict(crop)
    
    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format('POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', confidence)
    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format('POSITIVE' if label1 == config.POSITIVE_LABEL else 'NEGATIVE', confidence1)
    if (label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD) or  (label1 == config.POSITIVE_LABEL and confidence1 < config.POSITIVE_THRESHOLD) :
        print 'Recognized face!'
	'''TO DO'''
	return True
    else:
	print 'Did not recognize face!'
	'''TO DO'''
	return False
Ejemplo n.º 7
0
def admincapture():	
	if request.method == 'GET':
		

		# Prefix for positive training image filenames.
		POSITIVE_FILE_PREFIX = 'positive_'
		camera = config.get_camera()
		# Create the directory for positive training images if it doesn't exist.
		if not os.path.exists(config.POSITIVE_DIR):
			os.makedirs(config.POSITIVE_DIR)
		# Find the largest ID of existing positive images.
		# Start new images after this ID value.
		files = sorted(glob.glob(os.path.join(config.POSITIVE_DIR, 
			POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm')))
		count = 0
		if len(files) > 0:
			# Grab the count from the last filename.
			count = int(files[-1][-7:-4])+1
		
		while True:
			
			print 'Capturing image...'
			
			image = camera.read()
			# Convert image to grayscale.
			image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
			# Get coordinates of single face in captured image.
			result = face.detect_single(image)
			if result is None:
				print 'Could not detect single face!  Check the image in capture.pgm' \
						  ' to see what was captured and try again with only one face visible.'
				return 'Could not detect single face!  Check the image in capture.pgm' \
						  ' to see what was captured and try again with only one face visible.'
				break
				#continue
			x, y, w, h = result
				# Crop image as close as possible to desired face aspect ratio.
				# Might be smaller if face is near edge of image.
			crop = face.crop(image, x, y, w, h)
				# Save image to file.
			filename = os.path.join(config.POSITIVE_DIR, POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
			cv2.imwrite(filename, crop)
			print 'Found face and wrote training image', filename
			return 'Found face and wrote training image'
			
			if True:
				print"succesful capture"
				delay() 
			if True: 
				return redirect(url_for('/new'))



			#<----*****THis count will need to be edited --> 
			count += 1

			if count >4:
				break
Ejemplo n.º 8
0
def main():
    # Load training data into model
    print 'Loading training data...'
    model = cv2.createEigenFaceRecognizer()
    model.load(config.TRAINING_FILE)
    print 'Training data loaded!'
    # Initialize camer and box.
    camera = config.get_camera()
    door = hardware.Door()
    # Move box to locked position.
    door.lock()
    print 'Running Lock...'
    print 'Press button to lock (if unlocked), or unlock if the correct face is detected.'
    print 'Press Ctrl-C to quit.'
    while True:
        try:
            # Check if capture should be made.
            # TODO: Check if button is pressed.
            if door.is_button_up() or is_letter_input('l'):
                if not door.is_locked:
                    # Lock the door if it is unlocked
                    door.lock()
                    print 'Door is now locked.'
                else:
                    print 'Button pressed, looking for face...'
                    # Check for the positive face and unlock if found.
                    image = camera.read()
                    # Convert image to grayscale.
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                    # Get coordinates of single face in captured image.
                    result = face.detect_single(image)
                    if result is None:
                        print 'Could not detect single face!  Check the image in capture.pgm' \
                           ' to see what was captured and try again with only one face visible.'
                        soundChannelC.play(soundC)
                        sleep(.01)
                        continue
                    x, y, w, h = result
                    # Crop and resize image to face.
                    crop = face.resize(face.crop(image, x, y, w, h))
                    # Test face against model.
                    label, confidence = model.predict(crop)
                    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
                        'POSITIVE' if label == config.POSITIVE_LABEL else
                        'NEGATIVE', confidence)
                    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
                        print 'Recognized face! Unlocking Door Now...'
                        door.unlock()
                        soundChannelA.play(soundA)
                        sleep(.01)
                    else:
                        print 'Did not recognize face!'
                        soundChannelB.play(soundB)
                        sleep(.01)
        except KeyboardInterrupt:
            door.clean()
            sys.exit()
Ejemplo n.º 9
0
def main():
	# Load training data into model
	print 'Loading training data...'
	model = cv2.createEigenFaceRecognizer()
	model.load(config.TRAINING_FILE)
	print 'Training data loaded!'
	# Initialize camer and box.
	camera = config.get_camera()
	door = hardware.Door()
	# Move box to locked position.
	door.lock()
	print 'Running Lock...'
	print 'Press button to lock (if unlocked), or unlock if the correct face is detected.'
	print 'Press Ctrl-C to quit.'
	while True:
		try:
			# Check if capture should be made.
			# TODO: Check if button is pressed.
			if door.is_button_up() or is_letter_input('l'):
				if not door.is_locked:
					# Lock the door if it is unlocked
					door.lock()
					print 'Door is now locked.'
				else:
					print 'Button pressed, looking for face...'
					# Check for the positive face and unlock if found.
					image = camera.read()
					# Convert image to grayscale.
					image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
					# Get coordinates of single face in captured image.
					result = face.detect_single(image)
					if result is None:
						print 'Could not detect single face!  Check the image in capture.pgm' \
							  ' to see what was captured and try again with only one face visible.'
						soundChannelC.play(soundC)
						sleep(.01)
						continue
					x, y, w, h = result
					# Crop and resize image to face.
					crop = face.resize(face.crop(image, x, y, w, h))
					# Test face against model.
					label, confidence = model.predict(crop)
					print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
						'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', 
						confidence)
					if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
						print 'Recognized face! Unlocking Door Now...'
						door.unlock()
						soundChannelA.play(soundA)
						sleep(.01)
					else:
						print 'Did not recognize face!'
						soundChannelB.play(soundB)
						sleep(.01)
		except KeyboardInterrupt:
			door.clean()
			sys.exit()
Ejemplo n.º 10
0
def comp():
	datab = request.form['comp']
	#print datab 
	#open image path to save after decode
	inital = open("limage/attempt.jpg", "wb")
	inital.write(datab.decode('base64'))
	inital.close()
	#open image and convert to pgm format
	second = Image.open('limage/attempt.jpg')
	second = second.convert('RGB')
	second.save('limage/attempt.pgm')

	print 'Loading training data...'
	#initalize opencv facerecognizer class
	model = cv2.createEigenFaceRecognizer()
	#loads xml training file creaded by train.py
	model.load(config.TRAINING_FILE)
	print 'Training data loaded!'
	print 'Capturing Profile...'
	#start loop to process users image 
	while True:
		#read in converted pgm image and change to grayscale
		third= cv2.imread('limage/attempt.pgm')
		#print type(third)
		compare = cv2.cvtColor(third,cv2.COLOR_RGB2GRAY)
		#run face detect cv process
		result = face.detect_single(compare)
		if result is None:
				print 'Could not detect one face!'
				#return "User Not Detected"
				flash("User Not Detected! Please retake image", 'danger')
				return render_template('facelogin.html')
				break
		x, y, w, h = result
		# Crop and resize image to face.
		crop = face.resize(face.crop(compare, x, y, w, h))
		#write debug image after crop and resize peformed 
		cv2.imwrite('limage/debug.pgm',crop)
	
		#read croped image for model to process--prevents wrong shape matrices error 
		final = cv2.imread('limage/debug.pgm',0)
		# Test user face against model
		label, confidence = model.predict(final)
		print 'Predicted face with confidence {1} (lower is more confident).'.format(
					'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', 
					confidence)
		#if confidence level is less than set threshold in config.py user is accepted
		if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
			#return 'Accepted User'
			flash("User Accepted", 'success')
			return render_template('facelogin.html')
		#user is denied if confidence level is greater than set threshold in config.py	
		else:
			print 'Did not recognize user!'
			#return 'User Not Accepted !'
			flash("User Not Accepted!", 'danger')
			return render_template('facelogin.html')
Ejemplo n.º 11
0
    def detect_face(self):
        # Check for the positive face and unlock if found.
        image = self.camera.read()
        # Convert image to grayscale.
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        # Get coordinates of single face in captured image.
        result = face.detect_single(image)
 
        #x, y, w, h = result
        #print(result)
        return result
Ejemplo n.º 12
0
def admincapture():	
	
	data = request.form['mydata']
	#print data 
	#need to decode data here
	#open image path to save after decode
	img = open("userimg/capture.jpg", "wb")
	img.write(data.decode('base64'))
	img.close()
	#open image and convert to pgm format
	im = Image.open('userimg/capture.jpg')
	im = im.convert('RGB')
	im.save('userimg/capture.pgm')

	# Prefix for positive training image files
	POSITIVE_FILE_PREFIX = 'positive_'
	# Create the directory for positive training images if it doesn't exist.
	if not os.path.exists(config.POSITIVE_DIR):
			os.makedirs(config.POSITIVE_DIR)
	# Find the largest ID of existing positive images.
	# Start new images after this ID value.
	files = sorted(glob.glob(os.path.join(config.POSITIVE_DIR, 
		POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm')))
	count = 0
	if len(files) > 0:
		# Grab the count from the last filename.
		count = int(files[-1][-7:-4])+1

	while True:
		xx= cv2.imread('userimg/capture.pgm')
		print type(xx)
		yy = cv2.cvtColor(xx,cv2.COLOR_RGB2GRAY)
		cv2.imwrite('userimg/capture2.pgm',yy) 

		image = cv2.imread('userimg/capture2.pgm')
		result = face.detect_single(image)
		if result is None:
			print 'not detected'
			flash("User Profile Not Detected",'danger')
			#return 'not detected'
			return render_template("capture.html")
		break
	x, y, w, h = result
	crop = face.crop(image, x, y, w, h)
	filename = os.path.join(config.POSITIVE_DIR, POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
	cv2.imwrite(filename,crop)
	if True:
		print 'Captured sucess!'
		flash("Profile Captured Sucessfuly", 'success')
		time.sleep(3)
		#return 'capture sucess'
		return render_template("capture.html")
Ejemplo n.º 13
0
def main(argv):
    pid = int(sys.argv[1])
    print 'PID is: ', pid

    # Load training data into model
    print 'Loading training data...'
    model = cv2.createEigenFaceRecognizer()
    print 'Model created'
    model.load(config.TRAINING_FILE)
    print 'Training data loaded!'
    # Initialize camera and box.
    camera = config.get_camera()

    print 'Press Ctrl-C to quit.'
    goodpicture = False;
    while goodpicture == False:
        print 'Looking for face...'
        print 'Check for the positive face and unlock if found.'
        image = camera.read()

        print 'Convert image to grayscale.'
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

        print 'Get coordinates of single face in captured image.'
        result = face.detect_single(image)

        if result is None:
            print 'Could not detect single face!  Check the image in capture.pgm to see what was captured and try again with only one face visible.'
            #continue
        else: goodpicture = True;
    x, y, w, h = result
    print 'Crop and resize image to face.'
    crop = face.resize(face.crop(image, x, y, w, h))
    print 'Test face against model.'
    label, confidence = model.predict(crop)
    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
        'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE',
        confidence)

    print 'Starting to print in file'
    fo = open("foo.txt", "wr")

    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
        print 'Recognized face!'
        fo.write("recognized")
    else:
        print 'Did not recognize face!'
        fo.write("echec")

    fo.close()
    os.kill(pid, signal.SIGUSR2)
Ejemplo n.º 14
0
def convertImage(file):
    print('Converting image...')
    tmpimage = cv2.imread(file, cv2.COLOR_RGB2GRAY)
    # Get coordinates of single face in captured image.
    tmpresult = face.detect_single(tmpimage)
    if tmpresult is None:
        print('Could not detect single face! File: ' + file)
        return None

    x, y, w, h = tmpresult
    # Crop image as close as possible to desired face aspect ratio.
    # Might be smaller if face is near edge of image.
    crop = face.crop(tmpimage, x, y, w, h)
    return crop
Ejemplo n.º 15
0
	def ImageCapture(cls,ID):
		labels=[]
		images=[]
		# make sure this is the right file name
		faceCascade = cv2.CascadeClassifier(cascadePath)

		counter=0
		#counter2=0
		foldername=ID;
		if not os.path.exists(foldername):
			os.makedirs(foldername)

		name=foldername+"/Images"
		camera=PiCamera()
		camera.resolution=(320,240)
		camera.framerate=32
		rawCapture=PiRGBArray(camera,size=(320,240))
		time.sleep(3)

		cv2.namedWindow("Preview")
		camera.capture(rawCapture,format="bgr",use_video_port=True)
		while rawCapture is not None and counter<30:
			image=rawCapture.array
			gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
			result=face.detect_single(gray)
			cv2.imshow("Preview",image)
			if result is None:
				flag=0
				print "could not detect single face. Please retry."
			else:
				x,y,w,h=result
				flag=1
				cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
				scaled_byRatio=face.crop(gray,x,y,w,h)
				resized=face.resize(scaled_byRatio)
				print "Saved captured image No."+str(counter)
				counter=counter+1
				filename = name + str(counter) + ".pgm"
				cv2.imwrite(filename,resized)
		        
			rawCapture.truncate(0)
			camera.capture(rawCapture,format="bgr",use_video_port=True)
			key=cv2.waitKey(1)       

		    	
		camera.close()
		cv2.destroyWindow("Preview")
Ejemplo n.º 16
0
    def ImageCapture(cls, ID):
        labels = []
        images = []
        # make sure this is the right file name
        faceCascade = cv2.CascadeClassifier(cascadePath)

        counter = 0
        #counter2=0
        foldername = ID
        if not os.path.exists(foldername):
            os.makedirs(foldername)

        name = foldername + "/Images"
        camera = PiCamera()
        camera.resolution = (320, 240)
        camera.framerate = 32
        rawCapture = PiRGBArray(camera, size=(320, 240))
        time.sleep(3)

        cv2.namedWindow("Preview")
        camera.capture(rawCapture, format="bgr", use_video_port=True)
        while rawCapture is not None and counter < 30:
            image = rawCapture.array
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            result = face.detect_single(gray)
            cv2.imshow("Preview", image)
            if result is None:
                flag = 0
                print "could not detect single face. Please retry."
            else:
                x, y, w, h = result
                flag = 1
                cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
                scaled_byRatio = face.crop(gray, x, y, w, h)
                resized = face.resize(scaled_byRatio)
                print "Saved captured image No." + str(counter)
                counter = counter + 1
                filename = name + str(counter) + ".pgm"
                cv2.imwrite(filename, resized)

            rawCapture.truncate(0)
            camera.capture(rawCapture, format="bgr", use_video_port=True)
            key = cv2.waitKey(1)

        camera.close()
        cv2.destroyWindow("Preview")
Ejemplo n.º 17
0
def scan():
    print("Looking for face...")

    image = camera.read()
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    result = face.detect_single(image)

    if result is None:
        print(
            "Could not detect a single face. There may be other faces in view. Check the image in capture.pgm to see what was captured."
        )
        AuthenticateJSON.writeToJSON(False)
        return

    x, y, w, h = result
    crop = face.resize(face.crop(image, x, y, w, h))
    label, confidence = model.predict(crop)

    positiveId = ""

    if label == config.POSITIVE_LABEL:
        positiveId = "POSITIVE"
    else:
        positiveId = "NEGATIVE"

    print(
        'Predicted {0} face with confidence {1} (Lower number is higher confidence).'
        .format(positiveId, confidence))

    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
        AuthenticateJSON.writeToJSON(True)
        print('Face recognized. Access granted.')
        print(
            'Timeout for 30 seconds commencing -- CHANGE BACK TO 30 AFTER PRESENTATION'
        )
        print(
            'This will allow the user to stay "authenticated" on the webserver"'
        )
        print('Goodbye')
        time.sleep(10)
        AuthenticateJSON.writeToJSON(False)

    else:
        AuthenticateJSON.writeToJSON(False)
        print('Face was unrecognized. Access denied.')
Ejemplo n.º 18
0
def comp():
	#import cv2
	#import config
	#import face
	# Load training data into model
	print 'Loading training data...'
	model = cv2.createEigenFaceRecognizer()
	model.load(config.TRAINING_FILE)
	print 'Training data loaded!'
	# Initialize camera.
	camera = config.get_camera()
	print 'Capturing Profile...'
	
	while True:
			image = camera.read()
				# Convert image to grayscale.
			image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
				# Get coordinates of single face in captured image.
			result = face.detect_single(image)
			if result is None:
				print 'Could not detect one face!  Check the image capture.pgm' 
				return "User Not Detected"
				
				break
			x, y, w, h = result
				# Crop and resize image to face.
			crop = face.resize(face.crop(image, x, y, w, h))
				# Test face against model.
			label, confidence = model.predict(crop)
			print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
					'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', 
					confidence)
			#user_login for the redirect refers to the def user_login not /user_login
			
			#return redirect(url_for('user_login'))

			if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
				
					
				break
					
			else:
				print 'Did not recognize face!'
				return 'User Not Accepted !'
def captureImage():
	print('Capturing image...')
	image = camera.read()
	# Convert image to grayscale.
	image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
	# Get coordinates of single face in captured image.
	result = face.detect_single(image)
	if result is None:
		print('Could not detect single face!  Check the image in capture.pgm' \
				' to see what was captured and try again with only one face visible.')
		exit
	x, y, w, h = result
	# Crop image as close as possible to desired face aspect ratio.
	# Might be smaller if face is near edge of image.
	crop = face.crop(image, x, y, w, h)
	# Save image to file.
	filename = os.path.join(serverConfig.IMAGE_DIR, serverConfig.POSITIVE_DIR, POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
	cv2.imwrite(filename, crop)
	print('Found face and wrote training image', filename)
Ejemplo n.º 20
0
    def FaceRec(self):
        while True:

            # Check if capture should be made.
            # TODO: Check if button is pressed.
            if door.is_button_up() or is_letter_input('l'):
                led.LedOn()
                if not door.is_locked:
                    # Lock the door if it is unlocked
                    door.lock()
                    print 'Door is now locked.'
                else:
                    print 'Button pressed, looking for face...'
                    # Check for the positive face and unlock if found.
                    image = camera.read()
                    # Convert image to grayscale.
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                    # Get coordinates of single face in captured image.
                    result = face.detect_single(image)
                    if result is None:
                        print 'Could not detect single face!  Check the image in capture.pgm' \
                                  ' to see what was captured and try again with only one face visible.'
                        sleep(.01)
                        continue
                    x, y, w, h = result
                    # Crop and resize image to face.
                    crop = face.resize(face.crop(image, x, y, w, h))
                    # Test face against model.
                    label, confidence = model.predict(crop)
                    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
                        'POSITIVE' if label == config.POSITIVE_LABEL else
                        'NEGATIVE', confidence)
                    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
                        print 'Recognized face! Unlocking Door Now...'
                        door.unlock()
                        led.LedOff()
                        self.IRstatus()
                        return self.main()

                    else:
                        print 'Did not recognize face!'
                        sleep(.01)
Ejemplo n.º 21
0
def capture():
    camera = config.get_camera()
    # Create the directory for positive training images if it doesn't exist.
    if not os.path.exists(config.TRAINING_DIR + CAPTURE_DIR):
        os.makedirs(config.TRAINING_DIR + CAPTURE_DIR)
    # Find the largest ID of existing positive images.
    # Start new images after this ID value.
    files = sorted(glob.glob(os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '[0-9][0-9][0-9].pgm')))
    count = 0
    if len(files) > 0:
        # Grab the count from the last filename.
        count = int(files[-1][-7:-4]) + 1
    print 'Capturing positive training images.'
    print 'Press enter to capture an image.'
    print 'Press Ctrl-C to quit.'
    while True:
        try:
            raw_input()
            print 'Capturing image...'
            image = camera.read()
            # Convert image to grayscale.
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
            # Get coordinates of single face in captured image.
            result = face.detect_single(image)
            if result is None:
                print 'Could not detect single face!  Check the image in capture.pgm' \
                        ' to see what was captured and try again with only one face visible.'
                filename = os.path.join(config.TRAINING_DIR , 'capture.pgm')
                cv2.imwrite(filename, image)
                continue
            x, y, w, h = result
            # Crop image as close as possible to desired face aspect ratio.
            # Might be smaller if face is near edge of image.
            crop = face.crop(image, x, y, w, h)
            # Save image to file.
            filename = os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '%03d.pgm' % count)
            cv2.imwrite(filename, crop)
            print 'Found face and wrote training image', filename
            count += 1
        except KeyboardInterrupt:
            camera.stop()
            break
Ejemplo n.º 22
0
def recognizeFace(model):

    # Initialize camer and box.
    camera = serverConfig.get_camera()
    # Check for the positive face and unlock if found.
    print("Trying to read an image from the camera.")
    image = camera.read()
    # Convert image to grayscale.
    print("Converting image to greyscale.")
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    cv2.equalizeHist(image, image)
    # Get coordinates of single face in captured image.
    print("Trying to detect a single face.")
    result = face.detect_single(image)

    if result is None:
        print('Could not detect single face!  Check the image in capture.pgm' \
        ' to see what was captured and try again with only one face visible.')
        return 'NoFace'

    x, y, w, h = result
    # Crop and resize image to face.
    crop = face.resize(face.crop(image, x, y, w, h))
    # Test face against model.
    label, confidence = model.predict(crop)

    print(label)
    print(confidence)

    if label == serverConfig.NEGATIVE_LABEL:
        return 'Neg'
    else:
        for i in range(len(serverConfig.USERS)):
            if label == serverConfig.POSITIVE_LABELS[
                    i] and confidence < serverConfig.POSITIVE_THRESHOLD:
                print('Found a match')
                return serverConfig.USERS[i]

                # Must not be a match
                print('No Match')
        return 'Neg'
Ejemplo n.º 23
0
def capture_positive(num, name):
    """Capture one positive image. Return 1 if successful and 0 otherwise"""
    path = config.POSITIVE_DIR + "/" + name
    print('Capturing image...')
    image = camera.read()
    # Convert image to grayscale.
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    # Get coordinates of single face in captured image.
    result = face.detect_single(image)
    if result is None:
        print('Could not detect single face!')
        return 0
    x, y, w, h = result
    # Crop image as close as possible to desired face aspect ratio.
    # Might be smaller if face is near edge of image.
    crop = face.crop(image, x, y, w, h)
    # Save image to file.
    filename = os.path.join(path,
                            const.POSITIVE_FILE_PREFIX + '%03d.pgm' % num)
    cv2.imwrite(filename, crop)
    print('Found face and wrote training image', filename)
    return 1
Ejemplo n.º 24
0
def capture1():
        camera = config.get_camera()

        # Create the directory for positive training images if it doesn't exist.
        if not os.path.exists(config.POSITIVE_DIR1):
                os.makedirs(config.POSITIVE_DIR1)
        # Find the largest ID of existing positive images.
        # Start new images after this ID value.
        files = sorted(glob.glob(os.path.join(config.POSITIVE_DIR1,
                POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm')))
        count = 0
        if len(files) > 0:
                # Grab the count from the last filename.
                count = int(files[-1][-7:-4])+1

        print 'Capturing image...'
        image = camera.read()
        # Convert image to grayscale
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        # Get coordinates of single face in captured image.
        result = face.detect_single(image)
        if result is None:
            print 'Could not detect single face!  Check the image in capture.pgm' \
                      ' to see what was captured and try again with only one face visible.'
            return False

        #if buttons.decideAdd_button():

        x, y, w, h = result
        # Crop image as close as possible to desired face aspect ratio.
        # Might be smaller if face is near edge of image.
        crop = face.crop(image, x, y, w, h)


        # Save image to file.
        filename = os.path.join(config.POSITIVE_DIR1, POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
        cv2.imwrite(filename, crop)
        print 'Found face and wrote training image', filename
        return True
Ejemplo n.º 25
0
    def CapturePic(self):
        files = sorted(
            glob.glob(
                os.path.join(config.POSITIVE_DIR + ID,
                             POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm')))
        count = 0
        if len(files) > 0:
            # Grab the count from the last filename.

            count = int(files[-1][-7:-4]) + 1
        print 'Capturing positive training images.'
        print 'Press button or type c (and press enter) to capture an image.'
        print 'Press Ctrl-C to quit.'

        while True:
            if door.is_button_up() or is_letter_input('c'):
                led.LedOn()
                print 'Capturing image...'
                image = camera.read()
                # Convert image to grayscale.
                image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                # Get coordinates of single face in captured image.
                result = face.detect_single(image)
                if result is None:
                    print 'Could not detect single face!  Check the image in capture.pgm' \
                            ' to see what was captured and try again with only one face visible.'
                    continue
                x, y, w, h = result
                # Crop image as close as possible to desired face aspect ratio.
                # Might be smaller if face is near edge of image.
                crop = face.crop(image, x, y, w, h)
                # Save image to file.
                filename = os.path.join(
                    config.POSITIVE_DIR + ID,
                    POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
                cv2.imwrite(filename, crop)
                print 'Found face and wrote training image', filename
                led.LedOff()
                count += 1
Ejemplo n.º 26
0
def capture():
	camera = config.get_camera()
	# Create the directory for positive training images if it doesn't exist.
	if not os.path.exists(config.TRAINING_DIR + CAPTURE_DIR):
		os.makedirs(config.TRAINING_DIR + CAPTURE_DIR)
	# Find the largest ID of existing positive images.
	# Start new images after this ID value.
	files = sorted(glob.glob(os.path.join(config.TRAINING_DIR + CAPTURE_DIR, 
		'[0-9][0-9][0-9].pgm')))
	count = 0
	if len(files) > 0:
		# Grab the count from the last filename.
		count = int(files[-1][-7:-4])+1
	print 'Capturing positive training images.'
	print 'Type c (and press enter) to capture an image.'
	print 'Press Ctrl-C to quit.'
	while True:
		# Check if button was pressed or 'c' was received, then capture image.
		if  is_letter_input('c'):
			print 'Capturing image...'
			image = camera.read()
			# Convert image to grayscale.
			image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
			# Get coordinates of single face in captured image.
			result = face.detect_single(image)
			if result is None:
				print 'Could not detect single face!  Check the image in capture.pgm' \
					  ' to see what was captured and try again with only one face visible.'
				continue
			x, y, w, h = result
			# Crop image as close as possible to desired face aspect ratio.
			# Might be smaller if face is near edge of image.
			crop = face.crop(image, x, y, w, h)
			# Save image to file.
			filename = os.path.join(config.TRAINING_DIR + CAPTURE_DIR, '%03d.pgm' % count)
			cv2.imwrite(filename, crop)
			print 'Found face and wrote training image', filename
			count += 1
Ejemplo n.º 27
0
def scan():
    print("Looking for face...")

    image = camera.read()
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    result = face.detect_single(image)

    if result is None:
        print("Could not detect a single face. There may be other faces in view. Check the image in capture.pgm to see what was captured.")
        AuthenticateJSON.writeToJSON(False)
        return

    x, y, w, h = result
    crop = face.resize(face.crop(image, x, y, w, h))
    label, confidence = model.predict(crop)
            
    positiveId = ""

    if label == config.POSITIVE_LABEL:
        positiveId = "POSITIVE"
    else:
        positiveId = "NEGATIVE"

    print('Predicted {0} face with confidence {1} (Lower number is higher confidence).'.format(positiveId, confidence))

    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
        AuthenticateJSON.writeToJSON(True)
        print('Face recognized. Access granted.')
        print('Timeout for 30 seconds commencing -- CHANGE BACK TO 30 AFTER PRESENTATION')
        print('This will allow the user to stay "authenticated" on the webserver"')
        print('Goodbye')
        time.sleep(10)
	AuthenticateJSON.writeToJSON(False)
                    
    else:
        AuthenticateJSON.writeToJSON(False)
        print('Face was unrecognized. Access denied.')
Ejemplo n.º 28
0
	def Authenticate(cls):
		#load lookup table_ ky
		tableName=LOOKUP_FILE
		table=[]
		samples=[]
		#self.load_table(tableName,table,samples)

		# Create window
		cv2.namedWindow("Preview")
		#cv2.namedWindow("Compared")

		# Load training data into model
		print 'Loading training data...'
		model = cv2.createLBPHFaceRecognizer()
		model.load(TRAINING_FILE)
		print 'Training data loaded!'

		confidences=[]
		labels=[]

		camera=PiCamera()
		camera.resolution=(320,240)
		camera.framerate=32
		rawCapture=PiRGBArray(camera,size=(320,240))
		time.sleep(3)

		count=30
		reccognition=0

		print 'Looking for face...'
		camera.capture(rawCapture,format="bgr",use_video_port=True)
		while rawCapture is not None:
			image=rawCapture.array
			gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
			result=face.detect_single(gray)
			cv2.imshow("Preview",image)
			key=cv2.waitKey(1)
			if result is None:
				print "Please face to the camera "
			else:
				x, y, w, h = result
				# Crop and resize image to face
				crop = face.resize(face.crop(gray, x, y, w, h))
				label, confidence = model.predict(crop)
				confidences.append(confidence)
				labels.append(label)
				cv2.waitKey(1)
				count -= 1
			if count<=0:
				break
			rawCapture.truncate(0)
			camera.capture(rawCapture,format="bgr",use_video_port=True)
			
		print "finish capturing faces"
		camera.close()
		cv2.destroyWindow("Preview")


		temp=[]
		i=0
		length=len(labels)
		while length>0:
			if i==0:
				temp.append(labels[length-1])
				i += 1
				length -= 1
			else:
				tempi=0
				while tempi<i:
					if labels[length-1]!=temp[tempi]:
						tempi += 1
					else:
						length -=1
						break
				if tempi == i:
					temp.append(labels[length-1])
					i += 1
				length -= 1

		print "------LABELS:{}".format(labels)
		print "------DIFFERENT LABELS:{}".format(temp)
		print "------NUMBER OF DIFFERENT LABELS:{}".format(i)

		tempi=0
		numoflabel=0
		if i > 5:
			print "could not enter"
			return 0,-1
		else:
			element=temp[tempi]
			while tempi < i:
				tempj=0
				count=0
				while tempj<len(labels):
					if labels[tempj]==temp[tempi]:
						count += 1
					tempj += 1
				if count > numoflabel :
					numoflabel=count
					element=temp[tempi]
				tempi += 1
			print "element is {}, numoflabel is {}".format(element, numoflabel)


		tempi = 0
		con=0
		while tempi < len(labels):
			if labels[tempi]==element:
				con=con+confidences[tempi]
			tempi += 1
		ave=con/numoflabel

		print "mean of confidences is {}".format(ave)
		#print confidences

		# print recognition
		f=open(ENROLLMENT_FILE,'r')
		s=f.readline()
		flag=0
		while s!="":
			index=int(s)
			#print index
			if index==element:
				flag=1
				print "flag TRUE"
				break
			s=f.readline()

		if ave < 52 and flag==1:
			print "authenticated"
			return 1,element
		else:
			print "could not enter"
			return 0,-1
Ejemplo n.º 29
0
def LBPHupdate(ID):
	labels=[]
	images=[]
	# make sure this is the right file name
	faceCascade = cv2.CascadeClassifier(cascadePath)
	
	counter=0
	#counter2=0
	foldername=ID;
	if not os.path.exists(foldername):
	    os.makedirs(foldername)

	name=foldername+"/Images"
	camera=PiCamera()
	camera.resolution=(320,240)
	camera.framerate=32
	rawCapture=PiRGBArray(camera,size=(320,240))
	time.sleep(3)

	cv2.namedWindow("Capturing new images")
	camera.capture(rawCapture,format="bgr",use_video_port=True)
	while rawCapture is not None and counter<30:
		image=rawCapture.array
		gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
		result=face.detect_single(gray)
		cv2.imshow("Capturing new images",image)
		if result is None:
			flag=0
			print "could not detect single face. Please retry."
		else:
			x,y,w,h=result
			flag=1
			cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
			scaled_byRatio=face.crop(gray,x,y,w,h)
			resized=face.resize(scaled_byRatio)
			print "Saved captured image No."+str(counter)
			counter=counter+1
			filename = name + str(counter) + ".pgm"
			cv2.imwrite(filename,resized)
	        
		rawCapture.truncate(0)
		camera.capture(rawCapture,format="bgr",use_video_port=True)
		key=cv2.waitKey(1)       

	    	
	camera.close()
	cv2.destroyWindow("Capturing new images")


	#update database
	print 'Loading training data...'
	model=cv2.createLBPHFaceRecognizer()
	model.load(TRAINING_FILE)
	print 'Training data loaded!'

	f=open(CSV_FILE,'r+')
	t=open(LOOKUP_FILE,'r+')
	en=open(ENROLLMENT_FILE,'r+')
	#Get label
	f.seek(-10,2)
	s=f.readline()
	#print s
	list=s.split(';')
	num=str(list[1]).split('\n')
	#new label no.
	label=int(num[0])+1
	#print label

	f.seek(0,2)
	t.seek(0,2)
	en.seek(0,2)

	faces=[]
	labels=[]

	DIRECTORY=foldername
	#print DIRECTORY

	SEPARATOR=";"

	for files in os.listdir(DIRECTORY):
	    abs_path="%s\%s"%(DIRECTORY,files)
	    seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n'])
	    f.write(seq)
	    
	t.write(''.join([str(DIRECTORY),';',abs_path,';\n']));

	en.write(''.join([str(label),'\n']))

	f.close()
	t.close()
	en.close()

	for filename in walk_files(DIRECTORY,'*.pgm'):
	    #print filename
	    faces.append(prepare_image(filename))
	    labels.append(label)

	model.update(np.asarray(faces), np.asarray(labels))
	#print model

	#Save model results
	model.save(TRAINING_FILE)
	print 'Training data saved to',TRAINING_FILE

	print "successfully updated"
Ejemplo n.º 30
0
  # Crop and resize image to face.
  crop = face.resize(face.crop(image, x, y, w, h))
  cv2.imwrite('crop.jpeg', crop)
  # Test face against model.
  label, confidence = model.predict(crop)
  if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
    return config.HIT
  else:
    return config.NO_HIT


if __name__ == '__main__':
  result = config.NO_HIT
  imgPath = sys.argv[1]
  if not imgPath:
    sys.exit(0)
  image = cv2.imread(imgPath)
  if image is None:
    sys.exit(0)
  # Load training data into model
  #model = cv2.createEigenFaceRecognizer()
  #model = cv2.createFisherFaceRecognizer()
  model = cv2.createLBPHFaceRecognizer()
  model.load(config.TRAINING_FILE)
  # Convert image to grayscale.
  image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
  cv2.imwrite('gray.jpeg', image)
  faces = face.detect_single(image)
  if faces is not config.NO_HIT : 
    result = matchFace(image, faces)
  print result 
# Initialize camera and box.
camera = config.get_camera()

while True:
	# Getting image
	image = camera.read()
	gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
	cv2.equalizeHist(gray_image,gray_image)

	gray_image = cv2.resize(gray_image,(160,120))
	# Time calculation
	#t = cv2.getTickCount()

	# Human face detection
	result = face.detect_single(gray_image,config.HAAR_FACES)

	if result is None:
		pass
	else:
	#	print 'Human Face found'
		x, y, w, h = result
		cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
		cv2.imwrite(config.DEBUG_IMAGE, image)
		emailer.sendMail(["*****@*****.**"],
        	"Message from Your Scarecrow",
        	"There is someone in the garden",
        	["capture.jpg"])
	

	#t = cv2.getTickCount() - t
Ejemplo n.º 32
0
	def LBPHupdate(cls,ID):
		labels=[]
		images=[]
		# make sure this is the right file name
		faceCascade = cv2.CascadeClassifier(cascadePath)

		cv2.namedWindow("preview")

		vc = cv2.VideoCapture(0) # device ID may not be 0

		counter=0
		#counter2=0
		foldername=ID;
		if not os.path.exists(foldername):
			os.makedirs(foldername)

		name=foldername+"/Images"


		if vc.isOpened(): # try to get the first frame
			rval, frame = vc.read()
			print "opened"
			while rval==False:
				rval,frame=vc.read()
		else:
			print "error"
			rval = False
		# if vc.isOpened(): # try to get the first frame
		#     rval, frame = vc.read()
		#     print "opened"
		# else:
		#     print "error"
		#     rval = False

		while rval and counter<30:
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			#faces =faceCascade.detectMultiScale(gray)
			result=face.detect_single(gray)
			cv2.imshow("preview",frame)
			#for (x, y, w, h) in face:
			if result is None:
				flag=0
				print "could not detect single face. Please retry."
			else:
				x,y,w,h=result
				#print "hello"
				flag=1
				cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
				scaled_byRatio=face.crop(gray,x,y,w,h)
				resized=face.resize(scaled_byRatio)
				#resized=cv2.resize(gray[y:y+h,x:x+w],(200,200),interpolation=cv2.INTER_CUBIC)
				# keyword "interpolation" should not be left out
				cv2.imshow("preview", frame)
				#print "Ready for capture"
				print "Saved captured image No."+str(counter)
				counter=counter+1
				filename = name + str(counter) + ".pgm"
				cv2.imwrite(filename,resized)

			rval, frame = vc.read()
			key=cv2.waitKey(1)


		vc.release()
		cv2.destroyWindow("preview")


		#update database
		print 'Loading training data...'
		model=cv2.createLBPHFaceRecognizer()
		model.load(TRAINING_FILE)
		print 'Training data loaded!'

		f=open(CSV_FILE,'r+')
		t=open(LOOKUP_FILE,'r+')
		en=open(ENROLLMENT_FILE,'r+')
		#Get label
		f.seek(-10,2)
		s=f.readline()
		#print s
		list=s.split(';')
		num=str(list[1]).split('\n')
		#new label no.
		label=int(num[0])+1
		#print label

		f.seek(0,2)
		t.seek(0,2)
		en.seek(0,2)

		faces=[]
		labels=[]

		DIRECTORY=foldername
		#print DIRECTORY

		SEPARATOR=";"

		for files in os.listdir(DIRECTORY):
			abs_path="%s\%s"%(DIRECTORY,files)
			seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n'])
			f.write(seq)

		t.write(''.join([str(DIRECTORY),';',abs_path,';\n']));

		en.write(''.join([str(label),'\n']))

		f.close()
		t.close()
		en.close()

		for filename in cls.walk_files(DIRECTORY,'*.pgm'):
			#print filename
			faces.append(cls.prepare_image(filename))
			labels.append(label)

		model.update(np.asarray(faces), np.asarray(labels))
		#print model

		#Save model results
		model.save(TRAINING_FILE)
		print 'Training data saved to',TRAINING_FILE

		print "successfully updated"

		shutil.rmtree(foldername)
		return label
Ejemplo n.º 33
0
	def Authenticate(cls):
		#load lookup table_ ky
		tableName=LOOKUP_FILE
		table=[]
		samples=[]
		#self.load_table(tableName,table,samples)

		# Create window
		cv2.namedWindow("Preview")
		#cv2.namedWindow("Compared")

		# Load training data into model
		print 'Loading training data...'
		model = cv2.createLBPHFaceRecognizer()
		model.load(TRAINING_FILE)
		print 'Training data loaded!'

		confidences=[]
		labels=[]

		vc = cv2.VideoCapture(0)
		print 'Looking for face...'
		if vc.isOpened(): # try to get the first frame
			rval, frame = vc.read()
			print "opened"
			while rval==False:
				rval,frame=vc.read()
		else:
			print "error"
			rval = False

		count=30
		recognition=0
		while rval:
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			result=face.detect_single(gray)
			cv2.imshow("Preview",frame)
			key=cv2.waitKey(1)
			if result is None:
				print "Please face to the camera "
			else:
				x, y, w, h = result
				# Crop and resize image to face
				crop = face.resize(face.crop(gray, x, y, w, h))
				label, confidence = model.predict(crop)
				confidences.append(confidence)
				labels.append(label)
				cv2.waitKey(1)
				count -= 1
			if count<=0:
				break
			rval, frame = vc.read()
		print "finish capturing faces"
		vc.release()
		cv2.destroyWindow("Preview")


		temp=[]
		i=0
		length=len(labels)
		while length>0:
			if i==0:
				temp.append(labels[length-1])
				i += 1
				length -= 1
			else:
				tempi=0
				while tempi<i:
					if labels[length-1]!=temp[tempi]:
						tempi += 1
					else:
						length -=1
						break
				if tempi == i:
					temp.append(labels[length-1])
					i += 1
				length -= 1

		#print labels
		#print temp
		#print i

		tempi=0
		numoflabel=0
		if i > 5:
			print "could not enter"
			return 0,-1
		else:
			element=temp[tempi]
			while tempi < i:
				tempj=0
				count=0
				while tempj<len(labels):
					if labels[tempj]==temp[tempi]:
						count += 1
					tempj += 1
				if count > numoflabel :
					numoflabel=count
					element=temp[tempi]
				tempi += 1
			print "element is {}, numoflabel is {}".format(element, numoflabel)


		tempi = 0
		con=0
		while tempi < len(labels):
			if labels[tempi]==element:
				con=con+confidences[tempi]
			tempi += 1
		ave=con/numoflabel

		print "mean of confidences is {}".format(ave)
		#print confidences

		# print recognition
		f=open(ENROLLMENT_FILE,'r')
		s=f.readline()
		flag=0
		while s!="":
			index=int(s)
			#print index
			if index==element:
				flag=1
				print "flag TRUE"
				break
			s=f.readline()

		if ave < 52 and flag==1:
			print "authenticated"
			return 1,element
		else:
			print "could not enter"
			return 0,element
Ejemplo n.º 34
0
		def run(self):

			debut = time.time()
			self.ecrireStatus("INCONNU")
			reconnu=False

			# Load training data into model
			print 'Loading training data...'
			model = cv2.createEigenFaceRecognizer()
			model.load(config.TRAINING_FILE)
			print 'Training data loaded!'
			
			# Initialize camer and box.
			camera = cv2.VideoCapture(0)

			print 'Press Ctrl-C to quit.'
			
			while True:
						if time.time() - debut > 3:
							print reconnu
							reconnu=False
							ecrireStatus("INCONNU")
							print "3 secondes passees sans reconnaissance"
				
						ret, frame = camera.read()

						# Convert image to grayscale.
						image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
						
						# Get coordinates of single face in captured image.
						result = face.detect_single(image)
						if result is None:
							os.system('cls' if os.name=='nt' else 'clear')
							print 'PAS DE VISAGE'
							continue
						
						#dessiner le carre
						for (x, y, w, h) in result:
					    		cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
						#Display the resulting frame
						#cv2.imshow('Video', frame)
						
						nx = round(w*MagicMirror.coefW) + x
						ny = round(h*MagicMirror.coefH) + y
						nw = w - round(w*2*MagicMirror.coefW)
						nh = h - round(h*2*MagicMirror.coefH)

						

						cv2.imshow("Adding faces to traning set...", frame[ny: ny + nh, nx: nx + nw])
						cv2.waitKey(1) 
						

						x, y, w, h = result[0]
						# Crop and resize image to face.
						crop = face.resize(face.crop(image, nx, ny, nw, nh))

						# Test face against model.
						label, confidence = model.predict(crop)

						if label == config.POSITIVE_LABEL and confidence < MagicMirror.confidenceTolerate:
							os.system('cls' if os.name=='nt' else 'clear')
							print 'OUI'
							print confidence
							print x," ",nx," "
							print y," ",ny," "
							print w," ",nw," "
							print h," ",nh," "


							if reconnu == False:
								print 'OUI'
								print "changement de status"
								ecrireStatus("Imade")
								reconnu = True

							debut = time.time()
							
							
						else:
							os.system('cls' if os.name=='nt' else 'clear')
							print 'NON'
							print confidence
							print x," ",nx," "
							print y," ",ny," "
							print w," ",nw," "
							print h," ",nh," "
Ejemplo n.º 35
0
    def LBPHupdate(cls, ID):
        labels = []
        images = []
        # make sure this is the right file name
        faceCascade = cv2.CascadeClassifier(cascadePath)

        cv2.namedWindow("preview")

        vc = cv2.VideoCapture(0)  # device ID may not be 0

        counter = 0
        #counter2=0
        foldername = ID
        if not os.path.exists(foldername):
            os.makedirs(foldername)

        name = foldername + "/Images"

        if vc.isOpened():  # try to get the first frame
            rval, frame = vc.read()
            print "opened"
            while rval == False:
                rval, frame = vc.read()
        else:
            print "error"
            rval = False
        # if vc.isOpened(): # try to get the first frame
        #     rval, frame = vc.read()
        #     print "opened"
        # else:
        #     print "error"
        #     rval = False

        while rval and counter < 30:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #faces =faceCascade.detectMultiScale(gray)
            result = face.detect_single(gray)
            cv2.imshow("preview", frame)
            #for (x, y, w, h) in face:
            if result is None:
                flag = 0
                print "could not detect single face. Please retry."
            else:
                x, y, w, h = result
                #print "hello"
                flag = 1
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                scaled_byRatio = face.crop(gray, x, y, w, h)
                resized = face.resize(scaled_byRatio)
                #resized=cv2.resize(gray[y:y+h,x:x+w],(200,200),interpolation=cv2.INTER_CUBIC)
                # keyword "interpolation" should not be left out
                cv2.imshow("preview", frame)
                #print "Ready for capture"
                print "Saved captured image No." + str(counter)
                counter = counter + 1
                filename = name + str(counter) + ".pgm"
                cv2.imwrite(filename, resized)

            rval, frame = vc.read()
            key = cv2.waitKey(1)

        vc.release()
        cv2.destroyWindow("preview")

        #update database
        print 'Loading training data...'
        model = cv2.createLBPHFaceRecognizer()
        model.load(TRAINING_FILE)
        print 'Training data loaded!'

        f = open(CSV_FILE, 'r+')
        t = open(LOOKUP_FILE, 'r+')
        en = open(ENROLLMENT_FILE, 'r+')
        #Get label
        f.seek(-10, 2)
        s = f.readline()
        #print s
        list = s.split(';')
        num = str(list[1]).split('\n')
        #new label no.
        label = int(num[0]) + 1
        #print label

        f.seek(0, 2)
        t.seek(0, 2)
        en.seek(0, 2)

        faces = []
        labels = []

        DIRECTORY = foldername
        #print DIRECTORY

        SEPARATOR = ";"

        for files in os.listdir(DIRECTORY):
            abs_path = "%s\%s" % (DIRECTORY, files)
            seq = ''.join([str(abs_path), str(SEPARATOR), str(label), '\n'])
            f.write(seq)

        t.write(''.join([str(DIRECTORY), ';', abs_path, ';\n']))

        en.write(''.join([str(label), '\n']))

        f.close()
        t.close()
        en.close()

        for filename in cls.walk_files(DIRECTORY, '*.pgm'):
            #print filename
            faces.append(cls.prepare_image(filename))
            labels.append(label)

        model.update(np.asarray(faces), np.asarray(labels))
        #print model

        #Save model results
        model.save(TRAINING_FILE)
        print 'Training data saved to', TRAINING_FILE

        print "successfully updated"

        shutil.rmtree(foldername)
        return label
Ejemplo n.º 36
0
    def Authenticate(cls):
        #load lookup table_ ky
        tableName = LOOKUP_FILE
        table = []
        samples = []
        #self.load_table(tableName,table,samples)

        # Create window
        cv2.namedWindow("Preview")
        #cv2.namedWindow("Compared")

        # Load training data into model
        print 'Loading training data...'
        model = cv2.createLBPHFaceRecognizer()
        model.load(TRAINING_FILE)
        print 'Training data loaded!'

        confidences = []
        labels = []

        vc = cv2.VideoCapture(0)
        print 'Looking for face...'
        if vc.isOpened():  # try to get the first frame
            rval, frame = vc.read()
            print "opened"
            while rval == False:
                rval, frame = vc.read()
        else:
            print "error"
            rval = False

        count = 30
        recognition = 0
        while rval:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            result = face.detect_single(gray)
            cv2.imshow("Preview", frame)
            key = cv2.waitKey(1)
            if result is None:
                print "Please face to the camera "
            else:
                x, y, w, h = result
                # Crop and resize image to face
                crop = face.resize(face.crop(gray, x, y, w, h))
                label, confidence = model.predict(crop)
                confidences.append(confidence)
                labels.append(label)
                cv2.waitKey(1)
                count -= 1
            if count <= 0:
                break
            rval, frame = vc.read()
        print "finish capturing faces"
        vc.release()
        cv2.destroyWindow("Preview")

        temp = []
        i = 0
        length = len(labels)
        while length > 0:
            if i == 0:
                temp.append(labels[length - 1])
                i += 1
                length -= 1
            else:
                tempi = 0
                while tempi < i:
                    if labels[length - 1] != temp[tempi]:
                        tempi += 1
                    else:
                        length -= 1
                        break
                if tempi == i:
                    temp.append(labels[length - 1])
                    i += 1
                length -= 1

        #print labels
        #print temp
        #print i

        tempi = 0
        numoflabel = 0
        if i > 5:
            print "could not enter"
            return 0, -1
        else:
            element = temp[tempi]
            while tempi < i:
                tempj = 0
                count = 0
                while tempj < len(labels):
                    if labels[tempj] == temp[tempi]:
                        count += 1
                    tempj += 1
                if count > numoflabel:
                    numoflabel = count
                    element = temp[tempi]
                tempi += 1
            print "element is {}, numoflabel is {}".format(element, numoflabel)

        tempi = 0
        con = 0
        while tempi < len(labels):
            if labels[tempi] == element:
                con = con + confidences[tempi]
            tempi += 1
        ave = con / numoflabel

        print "mean of confidences is {}".format(ave)
        #print confidences

        # print recognition
        f = open(ENROLLMENT_FILE, 'r')
        s = f.readline()
        flag = 0
        while s != "":
            index = int(s)
            #print index
            if index == element:
                flag = 1
                print "flag TRUE"
                break
            s = f.readline()

        if ave < 52 and flag == 1:
            print "authenticated"
            return 1, element
        else:
            print "could not enter"
            return 0, element
Ejemplo n.º 37
0
    # Crop and resize image to face.
    crop = face.resize(face.crop(image, x, y, w, h))
    cv2.imwrite('crop.jpeg', crop)
    # Test face against model.
    label, confidence = model.predict(crop)
    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
        return config.HIT
    else:
        return config.NO_HIT


if __name__ == '__main__':
    result = config.NO_HIT
    imgPath = sys.argv[1]
    if not imgPath:
        sys.exit(0)
    image = cv2.imread(imgPath)
    if image is None:
        sys.exit(0)
    # Load training data into model
    #model = cv2.createEigenFaceRecognizer()
    #model = cv2.createFisherFaceRecognizer()
    model = cv2.createLBPHFaceRecognizer()
    model.load(config.TRAINING_FILE)
    # Convert image to grayscale.
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    cv2.imwrite('gray.jpeg', image)
    faces = face.detect_single(image)
    if faces is not config.NO_HIT:
        result = matchFace(image, faces)
    print result
Ejemplo n.º 38
0
def main():

    debut = time.time()
    ecrireStatus("INCONNU")
    reconnu = False

    # Load training data into model
    print 'Loading training data...'
    model = cv2.createEigenFaceRecognizer()
    model.load(config.TRAINING_FILE)
    print 'Training data loaded!'

    # Initialize camer and box.
    camera = cv2.VideoCapture(0)

    print 'Press Ctrl-C to quit.'

    while True:
        if time.time() - debut > 3:
            print reconnu
            reconnu = False
            ecrireStatus("INCONNU")
            print "3 secondes passees sans reconnaissance"

        ret, frame = camera.read()

        # Convert image to grayscale.
        image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

        # Get coordinates of single face in captured image.
        result = face.detect_single(image)
        if result is None:
            os.system('cls' if os.name == 'nt' else 'clear')
            print 'PAS DE VISAGE'
            continue

        #dessiner le carre
        for (x, y, w, h) in result:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        #Display the resulting frame
        #cv2.imshow('Video', frame)

        nx = round(w * coefW) + x
        ny = round(h * coefH) + y
        nw = w - round(w * 2 * coefW)
        nh = h - round(h * 2 * coefH)

        #cv2.imshow("Adding faces to traning set...", frame[ny: ny + nh, nx: nx + nw])
        cv2.waitKey(1)

        x, y, w, h = result[0]
        # Crop and resize image to face.
        crop = face.resize(face.crop(image, nx, ny, nw, nh))

        # Test face against model.
        label, confidence = model.predict(crop)

        if label == config.POSITIVE_LABEL and confidence < confidenceTolerate:
            os.system('cls' if os.name == 'nt' else 'clear')
            print 'OUI'
            print confidence
            print x, " ", nx, " "
            print y, " ", ny, " "
            print w, " ", nw, " "
            print h, " ", nh, " "

            if reconnu == False:
                print 'OUI'
                print "changement de status"
                ecrireStatus("Imade")
                reconnu = True

            debut = time.time()

        else:
            os.system('cls' if os.name == 'nt' else 'clear')
            print 'NON'
            print confidence
            print x, " ", nx, " "
            print y, " ", ny, " "
            print w, " ", nw, " "
            print h, " ", nh, " "
Ejemplo n.º 39
0
        image = frame.array

        ######################################################################################################################
        if command == config.STATE_DETECT or command == config.STATE_SEARCH:

            # create grayscale version
            grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            crop = image.copy()

            #cv2.equalizeHist(grayscale, grayscale)
            #faces = cascade.detectMultiScale(grayscale, scaleFactor=1.3,minNeighbors=5, minSize=(30, 30), flags = cv2.cv.CV_HAAR_SCALE_IMAGE)face.detect_single(image)
            #if len(faces)>0:
            #  for i in faces:
            #    x, y, w, h = i
            #    crop = face.crop(image, x, y, w, h)
            result = face.detect_single(grayscale)
            if result != None:
                x, y, w, h = result
                # Crop and resize image to face.
                crop = face.resize(face.crop(grayscale, x, y, w, h))
                #################################################################################################################
                if ALIVE:
                    if command == config.STATE_SEARCH:

                        rec_result, data_name, confidence = face_recognize(
                            crop)
                        if rec_result:
                            if confidence <= 45:
                                lightLed(config.GREEN_LED_PIN)
                                print 'green'
                                recordTime(data_name)
    def ReC():
        Pinlbl = Label(top, text="Insert The Pin Code", font=myfont, width=16)
        Pinlbl.grid(row=0, column=3)
        Pinlbl.configure(bg='#ff7700')
        pinCode = code.get()
        print("Pin code: ", pinCode)  #Debug

        c.execute("SELECT * FROM RFID WHERE ADMIN = '%s'" %
                  (pinCode))  #PinCode = 711003

        if c.rowcount != 1:
            print("Access Denied, YOU SHALL NOT PASS!")

        else:

            if __name__ == '__main__':
                camera = config.get_camera()

                # Create the directory for positive training images if it doesn't exist.
                if not os.path.exists(config.POSITIVE_DIR):
                    os.makedirs(config.POSITIVE_DIR)
                # Find the largest ID of existing positive images.
                # Start new images after this ID value.
                files = sorted(
                    glob.glob(
                        os.path.join(
                            config.POSITIVE_DIR,
                            POSITIVE_FILE_PREFIX + '[0-9][0-9][0-9].pgm')))
                count = 0
                if len(files) > 0:
                    # Grab the count from the last filename.
                    count = int(files[-1][-7:-4]) + 1
                print 'Capturing positive training images.'

                while True:
                    print 'Capturing image...'
                    Pinlbl = Label(top,
                                   text="Capturing Image",
                                   font=myfont,
                                   width=16)
                    Pinlbl.grid(row=0, column=3)
                    Pinlbl.configure(bg='#ff7700')
                    GPIO.output(LED, GPIO.HIGH)  #Turn off LED
                    image = camera.read()
                    # Convert image to grayscale.
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                    # Get coordinates of single face in captured image.
                    result = face.detect_single(image)
                    if result is None:
                        print 'Could not detect single face!  Check the image in capture.pgm' \
                        ' to see what was captured and try again with only one face visible.'
                        Pinlbl = Label(top,
                                       text="Waiting Detection Face",
                                       font=myfont,
                                       width=16)
                        Pinlbl.grid(row=0, column=3)
                        Pinlbl.configure(bg='#ff7700')
                    continue
                    x, y, w, h = result
                    # Crop image as close as possible to desired face aspect ratio.
                    # Might be smaller if face is near edge of image.
                    crop = face.crop(image, x, y, w, h)
                    # Save image to file.
                    filename = os.path.join(
                        config.POSITIVE_DIR,
                        POSITIVE_FILE_PREFIX + '%03d.pgm' % count)
                    cv2.imwrite(filename, crop)
                    print 'Found face and wrote training image', filename
                    count += 1
                    Pinlbl = Label(topwin,
                                   text="Face Detected",
                                   font=myfont,
                                   width=16)
                    Pinlbl.grid(row=0, column=3)
                    Pinlbl.configure(bg='#ff7700')
                    GPIO.output(LED, GPIO.LOW)  #Turn off LED
                    break
    def camera():
        if __name__ == '__main__':

            # Load training data into model
            print 'Loading training data...'
            Pinlbl = Label(top2,
                           text="Loading Training Data",
                           font=myfont,
                           width=16)
            Pinlbl.grid(row=0, column=3)
            Pinlbl.configure(bg='#ff7700')
            model = cv2.face.EigenFaceRecognizer_create()
            model.read(config.TRAINING_FILE)
            print 'Training data loaded!'
            # Initialize camer and box.
            camera = config.get_camera()
            # Move box to locked position.
            print 'Camera Running'
            Pinlbl = Label(top2, text="Camera Running", font=myfont, width=16)
            Pinlbl.grid(row=0, column=3)
            Pinlbl.configure(bg='#ff7700')

            while True:
                GPIO.output(LED, GPIO.HIGH)  #Turn off LED

                # Check for the positive face and unlock if found.
                image = camera.read()
                # Convert image to grayscale.
                image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                # Get coordinates of single face in captured image.
                result = face.detect_single(image)

                if result is None:
                    print 'Could not detect single face!  Check the image in capture.pgm' \
                     ' to see what was captured and try again with only one face visible.'
                    Pinlbl = Label(top2,
                                   text="Waiting Detection Face",
                                   font=myfont,
                                   width=12)
                    Pinlbl.grid(row=0, column=3)
                    Pinlbl.configure(bg='#ff7700')

                else:
                    x, y, w, h = result
                    # Crop and resize image to face.
                    crop = face.resize(face.crop(image, x, y, w, h))
                    # Test face against model.
                    label, confidence = model.predict(crop)
                    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
                        'POSITIVE' if label == config.POSITIVE_LABEL else
                        'NEGATIVE', confidence)

                    config.POSITIVE_THRESHOLD = 5000

                    if label == config.POSITIVE_LABEL:
                        print 'Recognized face!'
                        Pinlbl = Label(top2,
                                       text="Recognize Face",
                                       font=myfont,
                                       width=12)
                        Pinlbl.grid(row=0, column=3)
                        Pinlbl.configure(bg='#ff7700')
                        GPIO.output(Relay, GPIO.HIGH)  #Turn on Relay
                        time.sleep(5)  #Wait 5 Seconds
                        GPIO.output(LED, GPIO.LOW)  #Turn off LED
                        GPIO.output(Relay, GPIO.LOW)  #Turn off Relay
                        break

                    else:
                        print 'Did not recognize face!'
                        Pinlbl = Label(top2,
                                       text="Did Not Recognize Face",
                                       font=myfont,
                                       width=16)
                        Pinlbl.grid(row=0, column=3)
                        Pinlbl.configure(bg='#ff7700')
                        GPIO.output(Buzzer, GPIO.HIGH)  #Turn on Relay
                        time.sleep(5)
                        GPIO.output(LED, GPIO.LOW)  #Turn off LED
                        break
def LBPHupdate(ID):
    labels = []
    images = []
    # make sure this is the right file name
    faceCascade = cv2.CascadeClassifier(cascadePath)

    counter = 0
    #counter2=0
    foldername = ID
    if not os.path.exists(foldername):
        os.makedirs(foldername)

    name = foldername + "/Images"
    camera = PiCamera()
    camera.resolution = (320, 240)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(320, 240))
    time.sleep(3)

    cv2.namedWindow("Capturing new images")
    camera.capture(rawCapture, format="bgr", use_video_port=True)
    while rawCapture is not None and counter < 30:
        image = rawCapture.array
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        result = face.detect_single(gray)
        cv2.imshow("Capturing new images", image)
        if result is None:
            flag = 0
            print "could not detect single face. Please retry."
        else:
            x, y, w, h = result
            flag = 1
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            scaled_byRatio = face.crop(gray, x, y, w, h)
            resized = face.resize(scaled_byRatio)
            print "Saved captured image No." + str(counter)
            counter = counter + 1
            filename = name + str(counter) + ".pgm"
            cv2.imwrite(filename, resized)

        rawCapture.truncate(0)
        camera.capture(rawCapture, format="bgr", use_video_port=True)
        key = cv2.waitKey(1)

    camera.close()
    cv2.destroyWindow("Capturing new images")

    #update database
    print 'Loading training data...'
    model = cv2.createLBPHFaceRecognizer()
    model.load(TRAINING_FILE)
    print 'Training data loaded!'

    f = open(CSV_FILE, 'r+')
    t = open(LOOKUP_FILE, 'r+')
    en = open(ENROLLMENT_FILE, 'r+')
    #Get label
    f.seek(-10, 2)
    s = f.readline()
    #print s
    list = s.split(';')
    num = str(list[1]).split('\n')
    #new label no.
    label = int(num[0]) + 1
    #print label

    f.seek(0, 2)
    t.seek(0, 2)
    en.seek(0, 2)

    faces = []
    labels = []

    DIRECTORY = foldername
    #print DIRECTORY

    SEPARATOR = ";"

    for files in os.listdir(DIRECTORY):
        abs_path = "%s\%s" % (DIRECTORY, files)
        seq = ''.join([str(abs_path), str(SEPARATOR), str(label), '\n'])
        f.write(seq)

    t.write(''.join([str(DIRECTORY), ';', abs_path, ';\n']))

    en.write(''.join([str(label), '\n']))

    f.close()
    t.close()
    en.close()

    for filename in walk_files(DIRECTORY, '*.pgm'):
        #print filename
        faces.append(prepare_image(filename))
        labels.append(label)

    model.update(np.asarray(faces), np.asarray(labels))
    #print model

    #Save model results
    model.save(TRAINING_FILE)
    print 'Training data saved to', TRAINING_FILE

    print "successfully updated"
Ejemplo n.º 43
0
import cv2
import config
import face

print 'Loading training data...'
model = cv2.createEigenFaceRecognizer()
model.load(config.TRAINING_FILE)
print 'Training data loaded!'
# Initialize camera and box.
camera = config.get_camera()
while True:
    n = raw_input()
    print 'press c for taking photo'
    if n == 'c':
        image = camera.read()
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        result = face.detect_single(image)
        if result is None:
            print 'Could not detect face'
        continue
    x, y, w, h = result
    crop = face.resize(face.crop(image, x, y, w, h))
    label, confidence = model.predict(crop)
    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
        'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE',
        confidence)
    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
        print 'Recognized face!'
    else:
        print 'Did not recognize face!'
Ejemplo n.º 44
0
import cv2
import time

import config
import face
import camera

model = cv2.createEigenFaceRecognizer()
model.load(config.TRAINING_FILE)

print "Model loaded ..."

while True:
    filename = config.TEST_FILE

    camera.take_pic(filename)

    img = cv2.imread(filename)
    bw_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    result = face.detect_single(bw_img)

    if result is None:
        print "No face detected ... :("
    else:
        x, y, w, h = result
        crop = face.resize(face.crop(bw_img, x, y, w, h))
        label, confidence = model.predict(crop)
        print "label ... '%s'" % label, "confidence ... '%s'" % confidence

    time.sleep(5)
Ejemplo n.º 45
0
    # Initialize camer and box.
    camera = config.get_camera()
    personPresent = 0

    print "Running Hal 9000 recognition"
    print "Press Ctrl-C to quit."
    while True:
        # Check if capture should be made.
        # TODO: Check if button is pressed.
        # if is_letter_input('c'):
        # Check for the positive face and unlock if found.
        image = camera.read()
        # Convert image to grayscale.
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        # Get coordinates of single face in captured image.
        result = face.detect_single(image)
        if result is None:
            print "No Face"
            time.sleep(10)
            personPresent = 0
            continue
        x, y, w, h = result
        # Crop and resize image to face.
        crop = face.resize(face.crop(image, x, y, w, h))
        # Test face against model.
        label, confidence = model.predict(crop)
        print "Predicted {0} face with confidence {1} (lower is more confident).".format(
            "POSITIVE" if label == config.POSITIVE_LABEL else "NEGATIVE", confidence
        )
        if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD and personPresent == 0:
            print "Recognized face!"
Ejemplo n.º 46
0
			while rval==False:
				rval,frame=vc.read()
		else:
			print "error"
			rval = False
		# if vc.isOpened(): # try to get the first frame
		#     rval, frame = vc.read()
		#     print "opened"
		# else:
		#     print "error"
		#     rval = False

		while rval and counter<30:
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			#faces =faceCascade.detectMultiScale(gray)
			result=face.detect_single(gray)
			cv2.imshow("preview",frame)
			#for (x, y, w, h) in face:
			if result is None:
				flag=0
				print "could not detect single face. Please retry."
			else:
				x,y,w,h=result
				#print "hello"
				flag=1
				cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
				scaled_byRatio=face.crop(gray,x,y,w,h)
				resized=face.resize(scaled_byRatio)
				#resized=cv2.resize(gray[y:y+h,x:x+w],(200,200),interpolation=cv2.INTER_CUBIC)
				# keyword "interpolation" should not be left out
				cv2.imshow("preview", frame)
Ejemplo n.º 47
0
    def Authenticate(cls):
        #load lookup table_ ky
        tableName = LOOKUP_FILE
        table = []
        samples = []
        #self.load_table(tableName,table,samples)

        # Create window
        cv2.namedWindow("Preview")
        #cv2.namedWindow("Compared")

        # Load training data into model
        print 'Loading training data...'
        model = cv2.createLBPHFaceRecognizer()
        model.load(TRAINING_FILE)
        print 'Training data loaded!'

        confidences = []
        labels = []

        camera = PiCamera()
        camera.resolution = (320, 240)
        camera.framerate = 32
        rawCapture = PiRGBArray(camera, size=(320, 240))
        time.sleep(3)

        count = 30
        reccognition = 0

        print 'Looking for face...'
        camera.capture(rawCapture, format="bgr", use_video_port=True)
        while rawCapture is not None:
            image = rawCapture.array
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            result = face.detect_single(gray)
            cv2.imshow("Preview", image)
            key = cv2.waitKey(1)
            if result is None:
                print "Please face to the camera "
            else:
                x, y, w, h = result
                # Crop and resize image to face
                crop = face.resize(face.crop(gray, x, y, w, h))
                label, confidence = model.predict(crop)
                confidences.append(confidence)
                labels.append(label)
                cv2.waitKey(1)
                count -= 1
            if count <= 0:
                break
            rawCapture.truncate(0)
            camera.capture(rawCapture, format="bgr", use_video_port=True)

        print "finish capturing faces"
        camera.close()
        cv2.destroyWindow("Preview")

        temp = []
        i = 0
        length = len(labels)
        while length > 0:
            if i == 0:
                temp.append(labels[length - 1])
                i += 1
                length -= 1
            else:
                tempi = 0
                while tempi < i:
                    if labels[length - 1] != temp[tempi]:
                        tempi += 1
                    else:
                        length -= 1
                        break
                if tempi == i:
                    temp.append(labels[length - 1])
                    i += 1
                length -= 1

        print "------LABELS:{}".format(labels)
        print "------DIFFERENT LABELS:{}".format(temp)
        print "------NUMBER OF DIFFERENT LABELS:{}".format(i)

        tempi = 0
        numoflabel = 0
        if i > 5:
            print "could not enter"
            return 0, -1
        else:
            element = temp[tempi]
            while tempi < i:
                tempj = 0
                count = 0
                while tempj < len(labels):
                    if labels[tempj] == temp[tempi]:
                        count += 1
                    tempj += 1
                if count > numoflabel:
                    numoflabel = count
                    element = temp[tempi]
                tempi += 1
            print "element is {}, numoflabel is {}".format(element, numoflabel)

        tempi = 0
        con = 0
        while tempi < len(labels):
            if labels[tempi] == element:
                con = con + confidences[tempi]
            tempi += 1
        ave = con / numoflabel

        print "mean of confidences is {}".format(ave)
        #print confidences

        # print recognition
        f = open(ENROLLMENT_FILE, 'r')
        s = f.readline()
        flag = 0
        while s != "":
            index = int(s)
            #print index
            if index == element:
                flag = 1
                print "flag TRUE"
                break
            s = f.readline()

        if ave < 52 and flag == 1:
            print "authenticated"
            return 1, element
        else:
            print "could not enter"
            return 0, -1
Ejemplo n.º 48
0
def mark_attendance(root):
        flag = 1
        #print "Reading training images...!"
        while flag == True :
                faces = []
                labels = []
                names = {}
                pos_count = 0
                neg_count = 0
                id = 1
                # Read all positive images
                for (subdirs, dirs, files) in os.walk(config.POSITIVE_DIR):
                      for subdir in dirs:
                        names[id] = subdir
                        subjectpath = os.path.join(config.POSITIVE_DIR, subdir)
                        for filename in os.listdir(subjectpath):
                                path = subjectpath + '/' + filename
                                faces.append(prepare_image(path))
                                label = id
                                labels.append(int(label)) 
                                pos_count += 1
                        id += 1
                # Read all negative images
                for filename in walk_files(config.NEGATIVE_DIR, '*.pgm'):
                        faces.append(prepare_image(filename))
                        labels.append(config.NEGATIVE_LABEL)
                        neg_count += 1

                # Train model
                flag = 0
                model = cv2.createEigenFaceRecognizer()
                model.train(np.asarray(faces), np.asarray(labels))

                # Save model results
                model.save(config.TRAINING_FILE)

                # Save mean and eignface images which summarize the face recognition model.
                mean = model.getMat("mean").reshape(faces[0].shape)
                cv2.imwrite(MEAN_FILE, normalize(mean, 0, 255, dtype=np.uint8))
                eigenvectors = model.getMat("eigenvectors")
                pos_eigenvector = eigenvectors[:,0].reshape(faces[0].shape)
                cv2.imwrite(POSITIVE_EIGENFACE_FILE, normalize(pos_eigenvector, 0, 255, dtype=np.uint8))
                neg_eigenvector = eigenvectors[:,1].reshape(faces[0].shape)
                cv2.imwrite(NEGATIVE_EIGENFACE_FILE, normalize(neg_eigenvector, 0, 255, dtype=np.uint8))
        # Load training data into model
        print 'Loading training data...'
        model = cv2.createEigenFaceRecognizer()
        model.load(config.TRAINING_FILE)
        print 'Training data loaded!'
        # Initialize camera
        camera = config.get_camera()
        c = 0
        while c < 1:
                image = camera.read()
                # Convert image to grayscale.
                image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                # Get coordinates of single face in captured image.
                result = face.detect_single(image)
                if result is None:
                        print 'Could not detect single face!  Check the image in capture.pgm' \
                                  ' to see what was captured and try again with only one face visible.'
                        continue
                x, y, w, h = result
                # Crop and resize image to face.
                crop = face.resize(face.crop(image, x, y, w, h))
                # Test face against model.
                label, confidence = model.predict(crop)
                if not label == config.NEGATIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
                        
                        tattend=time.asctime(time.localtime(time.time()))
                        c+=1
                        name = names[label]
                        mark(name,tattend)
                        popup(name)
                else:
                        popuperror()
                        c+=1