Exemple #1
0
def main():
	# Load training data into model
	print 'Loading training data...'
	model = cv2.createEigenFaceRecognizer()
	model.load(config.TRAINING_FILE)
	print 'Training data loaded!'
	# Initialize camer and box.
	camera = config.get_camera()
	door = hardware.Door()
	# Move box to locked position.
	door.lock()
	print 'Running Lock...'
	print 'Press button to lock (if unlocked), or unlock if the correct face is detected.'
	print 'Press Ctrl-C to quit.'
	while True:
		try:
			# Check if capture should be made.
			# TODO: Check if button is pressed.
			if door.is_button_up() or is_letter_input('l'):
				if not door.is_locked:
					# Lock the door if it is unlocked
					door.lock()
					print 'Door is now locked.'
				else:
					print 'Button pressed, looking for face...'
					# Check for the positive face and unlock if found.
					image = camera.read()
					# Convert image to grayscale.
					image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
					# Get coordinates of single face in captured image.
					result = face.detect_single(image)
					if result is None:
						print 'Could not detect single face!  Check the image in capture.pgm' \
							  ' to see what was captured and try again with only one face visible.'
						soundChannelC.play(soundC)
						sleep(.01)
						continue
					x, y, w, h = result
					# Crop and resize image to face.
					crop = face.resize(face.crop(image, x, y, w, h))
					# Test face against model.
					label, confidence = model.predict(crop)
					print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
						'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', 
						confidence)
					if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
						print 'Recognized face! Unlocking Door Now...'
						door.unlock()
						soundChannelA.play(soundA)
						sleep(.01)
					else:
						print 'Did not recognize face!'
						soundChannelB.play(soundB)
						sleep(.01)
		except KeyboardInterrupt:
			door.clean()
			sys.exit()
Exemple #2
0
def comp():
	datab = request.form['comp']
	#print datab 
	#open image path to save after decode
	inital = open("limage/attempt.jpg", "wb")
	inital.write(datab.decode('base64'))
	inital.close()
	#open image and convert to pgm format
	second = Image.open('limage/attempt.jpg')
	second = second.convert('RGB')
	second.save('limage/attempt.pgm')

	print 'Loading training data...'
	#initalize opencv facerecognizer class
	model = cv2.createEigenFaceRecognizer()
	#loads xml training file creaded by train.py
	model.load(config.TRAINING_FILE)
	print 'Training data loaded!'
	print 'Capturing Profile...'
	#start loop to process users image 
	while True:
		#read in converted pgm image and change to grayscale
		third= cv2.imread('limage/attempt.pgm')
		#print type(third)
		compare = cv2.cvtColor(third,cv2.COLOR_RGB2GRAY)
		#run face detect cv process
		result = face.detect_single(compare)
		if result is None:
				print 'Could not detect one face!'
				#return "User Not Detected"
				flash("User Not Detected! Please retake image", 'danger')
				return render_template('facelogin.html')
				break
		x, y, w, h = result
		# Crop and resize image to face.
		crop = face.resize(face.crop(compare, x, y, w, h))
		#write debug image after crop and resize peformed 
		cv2.imwrite('limage/debug.pgm',crop)
	
		#read croped image for model to process--prevents wrong shape matrices error 
		final = cv2.imread('limage/debug.pgm',0)
		# Test user face against model
		label, confidence = model.predict(final)
		print 'Predicted face with confidence {1} (lower is more confident).'.format(
					'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', 
					confidence)
		#if confidence level is less than set threshold in config.py user is accepted
		if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
			#return 'Accepted User'
			flash("User Accepted", 'success')
			return render_template('facelogin.html')
		#user is denied if confidence level is greater than set threshold in config.py	
		else:
			print 'Did not recognize user!'
			#return 'User Not Accepted !'
			flash("User Not Accepted!", 'danger')
			return render_template('facelogin.html')
Exemple #3
0
def matchFace(image, coordinate):
  x, y, w, h = coordinate
  # Crop and resize image to face.
  crop = face.resize(face.crop(image, x, y, w, h))
  cv2.imwrite('crop.jpeg', crop)
  # Test face against model.
  label, confidence = model.predict(crop)
  if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
    return config.HIT
  else:
    return config.NO_HIT
def main(argv):
    pid = int(sys.argv[1])
    print 'PID is: ', pid

    # Load training data into model
    print 'Loading training data...'
    model = cv2.createEigenFaceRecognizer()
    print 'Model created'
    model.load(config.TRAINING_FILE)
    print 'Training data loaded!'
    # Initialize camera and box.
    camera = config.get_camera()

    print 'Press Ctrl-C to quit.'
    goodpicture = False;
    while goodpicture == False:
        print 'Looking for face...'
        print 'Check for the positive face and unlock if found.'
        image = camera.read()

        print 'Convert image to grayscale.'
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

        print 'Get coordinates of single face in captured image.'
        result = face.detect_single(image)

        if result is None:
            print 'Could not detect single face!  Check the image in capture.pgm to see what was captured and try again with only one face visible.'
            #continue
        else: goodpicture = True;
    x, y, w, h = result
    print 'Crop and resize image to face.'
    crop = face.resize(face.crop(image, x, y, w, h))
    print 'Test face against model.'
    label, confidence = model.predict(crop)
    print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
        'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE',
        confidence)

    print 'Starting to print in file'
    fo = open("foo.txt", "wr")

    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
        print 'Recognized face!'
        fo.write("recognized")
    else:
        print 'Did not recognize face!'
        fo.write("echec")

    fo.close()
    os.kill(pid, signal.SIGUSR2)
	def ImageCapture(cls,ID):
		labels=[]
		images=[]
		# make sure this is the right file name
		faceCascade = cv2.CascadeClassifier(cascadePath)

		counter=0
		#counter2=0
		foldername=ID;
		if not os.path.exists(foldername):
			os.makedirs(foldername)

		name=foldername+"/Images"
		camera=PiCamera()
		camera.resolution=(320,240)
		camera.framerate=32
		rawCapture=PiRGBArray(camera,size=(320,240))
		time.sleep(3)

		cv2.namedWindow("Preview")
		camera.capture(rawCapture,format="bgr",use_video_port=True)
		while rawCapture is not None and counter<30:
			image=rawCapture.array
			gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
			result=face.detect_single(gray)
			cv2.imshow("Preview",image)
			if result is None:
				flag=0
				print "could not detect single face. Please retry."
			else:
				x,y,w,h=result
				flag=1
				cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
				scaled_byRatio=face.crop(gray,x,y,w,h)
				resized=face.resize(scaled_byRatio)
				print "Saved captured image No."+str(counter)
				counter=counter+1
				filename = name + str(counter) + ".pgm"
				cv2.imwrite(filename,resized)
		        
			rawCapture.truncate(0)
			camera.capture(rawCapture,format="bgr",use_video_port=True)
			key=cv2.waitKey(1)       

		    	
		camera.close()
		cv2.destroyWindow("Preview")
Exemple #6
0
def comp():
	#import cv2
	#import config
	#import face
	# Load training data into model
	print 'Loading training data...'
	model = cv2.createEigenFaceRecognizer()
	model.load(config.TRAINING_FILE)
	print 'Training data loaded!'
	# Initialize camera.
	camera = config.get_camera()
	print 'Capturing Profile...'
	
	while True:
			image = camera.read()
				# Convert image to grayscale.
			image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
				# Get coordinates of single face in captured image.
			result = face.detect_single(image)
			if result is None:
				print 'Could not detect one face!  Check the image capture.pgm' 
				return "User Not Detected"
				
				break
			x, y, w, h = result
				# Crop and resize image to face.
			crop = face.resize(face.crop(image, x, y, w, h))
				# Test face against model.
			label, confidence = model.predict(crop)
			print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
					'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', 
					confidence)
			#user_login for the redirect refers to the def user_login not /user_login
			
			#return redirect(url_for('user_login'))

			if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
				
					
				break
					
			else:
				print 'Did not recognize face!'
				return 'User Not Accepted !'
def scan():
    print("Looking for face...")

    image = camera.read()
    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    result = face.detect_single(image)

    if result is None:
        print("Could not detect a single face. There may be other faces in view. Check the image in capture.pgm to see what was captured.")
        AuthenticateJSON.writeToJSON(False)
        return

    x, y, w, h = result
    crop = face.resize(face.crop(image, x, y, w, h))
    label, confidence = model.predict(crop)
            
    positiveId = ""

    if label == config.POSITIVE_LABEL:
        positiveId = "POSITIVE"
    else:
        positiveId = "NEGATIVE"

    print('Predicted {0} face with confidence {1} (Lower number is higher confidence).'.format(positiveId, confidence))

    if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
        AuthenticateJSON.writeToJSON(True)
        print('Face recognized. Access granted.')
        print('Timeout for 30 seconds commencing -- CHANGE BACK TO 30 AFTER PRESENTATION')
        print('This will allow the user to stay "authenticated" on the webserver"')
        print('Goodbye')
        time.sleep(10)
	AuthenticateJSON.writeToJSON(False)
                    
    else:
        AuthenticateJSON.writeToJSON(False)
        print('Face was unrecognized. Access denied.')
Exemple #8
0
                # Check for the positive face and unlock if found.
                image = camera.read()
                
                # Convert image to grayscale.
                image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                # Get coordinates of single face in captured image.
                result = face.detect_single(image_gray)
                print (result)
                
                if result is None:
                        cv2.imshow('image',image)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                                break
                        print ('Could not detect single face!  Check the image in capture.pgm' \
                               ' to see what was captured and try again with only one face visible.')
                        continue
                
                x, y, w, h = result
                cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)                
                cv2.imshow('image',image)                
                # Crop and resize image to face.
                crop = face.resize(face.crop(image_gray, x, y, w, h))
                # Test face against recognizer.
                predicted  = recognizer.predict(crop)                
                if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

				
                
cv2.destroyAllWindows()
            print 'Button pressed, looking for face...'

            # Check for the positive face and unlock if found.
            image = camera.read()

            # Convert image to grayscale.
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

            # Get coordinates of single face in captured image.
            result = face.detect_single(image)

            if result is None:
                print 'Could not detect single face!  Check the image in capture.pgm' \
                   ' to see what was captured and try again with only one face visible.'
                continue
            x, y, w, h = result

            # Crop and resize image to face.
            crop = face.resize(face.crop(image, x, y, w, h))

            # Test face against model.
            label, confidence = model.predict(crop)

            if confidence < config.Threshold:
                print 'Recognized face!'

            else:
                print 'Did not recognize face!'
            print 'name = {0} with confidence {1}'.format(label, confidence)
            Database_1.data_entry(label)
Exemple #10
0
    print('Press Ctrl-C to quit.')
    while True:
        # Check for the positive face and unlock if found.
        image = camera.read()

        # Convert image to grayscale.
        image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        # Get coordinates of single face in captured image.
        result = face.detect_single(image_gray)
        print(result)

        if result is None:
            cv2.imshow('image', image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            print ('Could not detect single face!  Check the image in capture.pgm' \
                   ' to see what was captured and try again with only one face visible.')
            continue

        x, y, w, h = result
        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.imshow('image', image)
        # Crop and resize image to face.
        crop = face.resize(face.crop(image_gray, x, y, w, h))
        # Test face against recognizer.
        predicted = recognizer.predict(crop)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

cv2.destroyAllWindows()
Exemple #11
0
def prepareImage(fileName):

    return face.resize(cv2.imread(fileName, cv2.IMREAD_GRAYSCALE))
Exemple #12
0
def prepareImage(fileName):
    """Read an image as grayscale and resize it to the appropriate size for
    training the face recognition model.
    """
    return face.resize(cv2.imread(fileName, cv2.IMREAD_GRAYSCALE))
	def Authenticate(cls):
		#load lookup table_ ky
		tableName=LOOKUP_FILE
		table=[]
		samples=[]
		#self.load_table(tableName,table,samples)

		# Create window
		cv2.namedWindow("Preview")
		#cv2.namedWindow("Compared")

		# Load training data into model
		print 'Loading training data...'
		model = cv2.createLBPHFaceRecognizer()
		model.load(TRAINING_FILE)
		print 'Training data loaded!'

		confidences=[]
		labels=[]

		vc = cv2.VideoCapture(0)
		print 'Looking for face...'
		if vc.isOpened(): # try to get the first frame
			rval, frame = vc.read()
			print "opened"
			while rval==False:
				rval,frame=vc.read()
		else:
			print "error"
			rval = False

		count=30
		recognition=0
		while rval:
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			result=face.detect_single(gray)
			cv2.imshow("Preview",frame)
			key=cv2.waitKey(1)
			if result is None:
				print "Please face to the camera "
			else:
				x, y, w, h = result
				# Crop and resize image to face
				crop = face.resize(face.crop(gray, x, y, w, h))
				label, confidence = model.predict(crop)
				confidences.append(confidence)
				labels.append(label)
				cv2.waitKey(1)
				count -= 1
			if count<=0:
				break
			rval, frame = vc.read()
		print "finish capturing faces"
		vc.release()
		cv2.destroyWindow("Preview")


		temp=[]
		i=0
		length=len(labels)
		while length>0:
			if i==0:
				temp.append(labels[length-1])
				i += 1
				length -= 1
			else:
				tempi=0
				while tempi<i:
					if labels[length-1]!=temp[tempi]:
						tempi += 1
					else:
						length -=1
						break
				if tempi == i:
					temp.append(labels[length-1])
					i += 1
				length -= 1

		#print labels
		#print temp
		#print i

		tempi=0
		numoflabel=0
		if i > 5:
			print "could not enter"
			return 0,-1
		else:
			element=temp[tempi]
			while tempi < i:
				tempj=0
				count=0
				while tempj<len(labels):
					if labels[tempj]==temp[tempi]:
						count += 1
					tempj += 1
				if count > numoflabel :
					numoflabel=count
					element=temp[tempi]
				tempi += 1
			print "element is {}, numoflabel is {}".format(element, numoflabel)


		tempi = 0
		con=0
		while tempi < len(labels):
			if labels[tempi]==element:
				con=con+confidences[tempi]
			tempi += 1
		ave=con/numoflabel

		print "mean of confidences is {}".format(ave)
		#print confidences

		# print recognition
		f=open(ENROLLMENT_FILE,'r')
		s=f.readline()
		flag=0
		while s!="":
			index=int(s)
			#print index
			if index==element:
				flag=1
				print "flag TRUE"
				break
			s=f.readline()

		if ave < 52 and flag==1:
			print "authenticated"
			return 1,element
		else:
			print "could not enter"
			return 0,element
Exemple #14
0
            os.system('xdotool windowminimize $(xdotool getactivewindow)')
            count = count + 1

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & flag == True:
            result = face.detect_single(gray)

            if result is None:
                print 'No face!'
                lock.lock()

            if result is not None:
                x, y, w, h = result
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                crop = face.resize(face.crop(gray, x, y, w, h))

                facecrop = gray[y:y + h, x:x + w]
                print 'Image captured successfully!'

    if result is None:
        print 'Could not detect single face!'

    else:

        print 'Analysing image....'
        model = cv2.createEigenFaceRecognizer()
        model.load(config.TRAINING_FILE)

        label, confidence = model.predict(crop)
        print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
	def Authenticate(cls):
		#load lookup table_ ky
		tableName=LOOKUP_FILE
		table=[]
		samples=[]
		#self.load_table(tableName,table,samples)

		# Create window
		cv2.namedWindow("Preview")
		#cv2.namedWindow("Compared")

		# Load training data into model
		print 'Loading training data...'
		model = cv2.createLBPHFaceRecognizer()
		model.load(TRAINING_FILE)
		print 'Training data loaded!'

		confidences=[]
		labels=[]

		camera=PiCamera()
		camera.resolution=(320,240)
		camera.framerate=32
		rawCapture=PiRGBArray(camera,size=(320,240))
		time.sleep(3)

		count=30
		reccognition=0

		print 'Looking for face...'
		camera.capture(rawCapture,format="bgr",use_video_port=True)
		while rawCapture is not None:
			image=rawCapture.array
			gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
			result=face.detect_single(gray)
			cv2.imshow("Preview",image)
			key=cv2.waitKey(1)
			if result is None:
				print "Please face to the camera "
			else:
				x, y, w, h = result
				# Crop and resize image to face
				crop = face.resize(face.crop(gray, x, y, w, h))
				label, confidence = model.predict(crop)
				confidences.append(confidence)
				labels.append(label)
				cv2.waitKey(1)
				count -= 1
			if count<=0:
				break
			rawCapture.truncate(0)
			camera.capture(rawCapture,format="bgr",use_video_port=True)
			
		print "finish capturing faces"
		camera.close()
		cv2.destroyWindow("Preview")


		temp=[]
		i=0
		length=len(labels)
		while length>0:
			if i==0:
				temp.append(labels[length-1])
				i += 1
				length -= 1
			else:
				tempi=0
				while tempi<i:
					if labels[length-1]!=temp[tempi]:
						tempi += 1
					else:
						length -=1
						break
				if tempi == i:
					temp.append(labels[length-1])
					i += 1
				length -= 1

		print "------LABELS:{}".format(labels)
		print "------DIFFERENT LABELS:{}".format(temp)
		print "------NUMBER OF DIFFERENT LABELS:{}".format(i)

		tempi=0
		numoflabel=0
		if i > 5:
			print "could not enter"
			return 0,-1
		else:
			element=temp[tempi]
			while tempi < i:
				tempj=0
				count=0
				while tempj<len(labels):
					if labels[tempj]==temp[tempi]:
						count += 1
					tempj += 1
				if count > numoflabel :
					numoflabel=count
					element=temp[tempi]
				tempi += 1
			print "element is {}, numoflabel is {}".format(element, numoflabel)


		tempi = 0
		con=0
		while tempi < len(labels):
			if labels[tempi]==element:
				con=con+confidences[tempi]
			tempi += 1
		ave=con/numoflabel

		print "mean of confidences is {}".format(ave)
		#print confidences

		# print recognition
		f=open(ENROLLMENT_FILE,'r')
		s=f.readline()
		flag=0
		while s!="":
			index=int(s)
			#print index
			if index==element:
				flag=1
				print "flag TRUE"
				break
			s=f.readline()

		if ave < 52 and flag==1:
			print "authenticated"
			return 1,element
		else:
			print "could not enter"
			return 0,-1
def LBPHupdate(ID):
    labels = []
    images = []
    # make sure this is the right file name
    faceCascade = cv2.CascadeClassifier(cascadePath)

    cv2.namedWindow("preview")
    vc = cv2.VideoCapture(1)  # device ID may not be 0
    counter = 0
    #counter2=0
    foldername = ID
    if not os.path.exists(foldername):
        os.makedirs(foldername)

    name = foldername + "/Images"

    if vc.isOpened():  # try to get the first frame
        rval, frame = vc.read()
        print "opened"
        while rval == False:
            rval, frame = vc.read()
    else:
        print "error"
        rval = False
    # if vc.isOpened(): # try to get the first frame
    #     rval, frame = vc.read()
    #     print "opened"
    # else:
    #     print "error"
    #     rval = False

    while rval and counter < 30:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #faces =faceCascade.detectMultiScale(gray)
        result = face.detect_single(gray)
        cv2.imshow("preview", frame)
        #for (x, y, w, h) in face:
        if result is None:
            flag = 0
            print "could not detect single face. Please retry."
        else:
            x, y, w, h = result
            #print "hello"
            flag = 1
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            scaled_byRatio = face.crop(gray, x, y, w, h)
            resized = face.resize(scaled_byRatio)
            #resized=cv2.resize(gray[y:y+h,x:x+w],(200,200),interpolation=cv2.INTER_CUBIC)
            # keyword "interpolation" should not be left out
            cv2.imshow("preview", frame)
            #print "Ready for capture"
            print "Saved captured image No." + str(counter)
            counter = counter + 1
            filename = name + str(counter) + ".pgm"
            cv2.imwrite(filename, resized)

        rval, frame = vc.read()
        key = cv2.waitKey(1)

    vc.release()
    cv2.destroyWindow("preview")

    #update database
    print 'Loading training data...'
    model = cv2.createLBPHFaceRecognizer()
    model.load(TRAINING_FILE)
    print 'Training data loaded!'

    f = open(CSV_FILE, 'r+')
    t = open(LOOKUP_FILE, 'r+')
    en = open(ENROLLMENT_FILE, 'r+')
    #Get label
    f.seek(-10, 2)
    s = f.readline()
    #print s
    list = s.split(';')
    num = str(list[1]).split('\n')
    #new label no.
    label = int(num[0]) + 1
    #print label

    f.seek(0, 2)
    t.seek(0, 2)
    en.seek(0, 2)

    faces = []
    labels = []

    DIRECTORY = foldername
    #print DIRECTORY

    SEPARATOR = ";"

    for files in os.listdir(DIRECTORY):
        abs_path = "%s\%s" % (DIRECTORY, files)
        seq = ''.join([str(abs_path), str(SEPARATOR), str(label), '\n'])
        f.write(seq)

    t.write(''.join([str(DIRECTORY), ';', abs_path, ';\n']))

    en.write(''.join([str(label), '\n']))

    f.close()
    t.close()
    en.close()

    for filename in walk_files(DIRECTORY, '*.pgm'):
        #print filename
        faces.append(prepare_image(filename))
        labels.append(label)

    model.update(np.asarray(faces), np.asarray(labels))
    #print model

    #Save model results
    model.save(TRAINING_FILE)
    print 'Training data saved to', TRAINING_FILE

    print "successfully updated"

    shutil.rmtree(foldername)
		while rval and counter<30:
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			#faces =faceCascade.detectMultiScale(gray)
			result=face.detect_single(gray)
			cv2.imshow("preview",frame)
			#for (x, y, w, h) in face:
			if result is None:
				flag=0
				print "could not detect single face. Please retry."
			else:
				x,y,w,h=result
				#print "hello"
				flag=1
				cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
				scaled_byRatio=face.crop(gray,x,y,w,h)
				resized=face.resize(scaled_byRatio)
				#resized=cv2.resize(gray[y:y+h,x:x+w],(200,200),interpolation=cv2.INTER_CUBIC)
				# keyword "interpolation" should not be left out
				cv2.imshow("preview", frame)
				#print "Ready for capture"
				print "Saved captured image No."+str(counter)
				counter=counter+1
				filename = name + str(counter) + ".pgm"
				cv2.imwrite(filename,resized)

			rval, frame = vc.read()
			key=cv2.waitKey(1)


		vc.release()
		cv2.destroyWindow("preview")
Exemple #18
0
    def update(self, observable, arg):
        print "UI sees change" , observable.currentState

        if observable.currentState.authState == AuthStates.Assess:
            self.ledsOff()
	    GPIO.output(lightPins, 1)
	    image = camera.read()
            # Convert image to grayscale.
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

	    result = face.detect_single(image)
            if result is None:
                print 'Could not detect single face!  Check the image in capture.pgm' \
                        ' to see what was captured and try again with only one face visible.'              	
                self.playAudio( sorryAudioFile )
                self.controllable.failAuth()
            else:
                GPIO.output(lightPins, 0)
                sleep(0.2)
                GPIO.output(lightPins, 1)
                sleep(0.2)
                GPIO.output(lightPins, 0)
                sleep(0.2)
                GPIO.output(lightPins, 1)
	        x, y, w, h = result
                # Crop and resize image to face.
                crop = face.resize(face.crop(image, x, y, w, h))
                # Test face against model.
                label, confidence = model.predict(crop)
                print 'Predicted {0} face with confidence {1} (lower is more confident).'.format(
                      'POSITIVE' if label == config.POSITIVE_LABEL else 'NEGATIVE', 
                      confidence)
                if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD:
                  print 'Recognized face!'
                  self.playAudio( welcomeAudioFile )
                  self.controllable.acceptAuth()
                else:
                  print 'Did not recognize face!'
                  self.playAudio( intruderAudioFile )	
                  self.controllable.rejectAuth()		

        elif observable.currentState.authState != AuthStates.Assess:
	    GPIO.output(lightPins, 0)
        if  observable.currentState.alarmState == AlarmStates.Disarmed:
            if observable.currentState.authState == AuthStates.Accept:
                self.flasher.flash(self.greenOnly, self.greenOnly, self.ledsOff, self.greenOnly, 3)
            else:
                self.flasher.flash(self.greenOnly, self.greenOnly, self.greenOnly, self.greenOnly, 3) 
        elif observable.currentState.alarmState == AlarmStates.Armed:
            if observable.currentState.authState == AuthStates.Reject:
                self.flasher.flash(self.redOnly, self.ledsOff, self.ledsOff, self.redOnly, 3)
            elif    observable.currentState.authState == AuthStates.Fail:
                self.flasher.flash(self.redOnly, self.greenOn, self.greenOnly, self.redOnly, 3)
            elif    observable.currentState.authState == AuthStates.NoAuth:
                self.flasher.flash(self.redOnly, self.greenOn, self.greenOnly, self.redOnly, 1)
            
        
        if  observable.currentState.alarmState == AlarmStates.Fire:
            self.fireOn()
        else:
            self.fireOff()
Exemple #19
0
    def run(self):

        debut = time.time()
        self.ecrireStatus("INCONNU")
        reconnu = False

        # Load training data into model
        print 'Loading training data...'
        model = cv2.createEigenFaceRecognizer()
        model.load(config.TRAINING_FILE)
        print 'Training data loaded!'

        # Initialize camer and box.
        camera = cv2.VideoCapture(0)

        print 'Press Ctrl-C to quit.'

        while True:
            if time.time() - debut > 3:
                print reconnu
                reconnu = False
                ecrireStatus("INCONNU")
                print "3 secondes passees sans reconnaissance"

            ret, frame = camera.read()

            # Convert image to grayscale.
            image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

            # Get coordinates of single face in captured image.
            result = face.detect_single(image)
            if result is None:
                os.system('cls' if os.name == 'nt' else 'clear')
                print 'PAS DE VISAGE'
                continue

            #dessiner le carre
            for (x, y, w, h) in result:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            #Display the resulting frame
            #cv2.imshow('Video', frame)

            nx = round(w * MagicMirror.coefW) + x
            ny = round(h * MagicMirror.coefH) + y
            nw = w - round(w * 2 * MagicMirror.coefW)
            nh = h - round(h * 2 * MagicMirror.coefH)

            cv2.imshow("Adding faces to traning set...", frame[ny:ny + nh,
                                                               nx:nx + nw])
            cv2.waitKey(1)

            x, y, w, h = result[0]
            # Crop and resize image to face.
            crop = face.resize(face.crop(image, nx, ny, nw, nh))

            # Test face against model.
            label, confidence = model.predict(crop)

            if label == config.POSITIVE_LABEL and confidence < MagicMirror.confidenceTolerate:
                os.system('cls' if os.name == 'nt' else 'clear')
                print 'OUI'
                print confidence
                print x, " ", nx, " "
                print y, " ", ny, " "
                print w, " ", nw, " "
                print h, " ", nh, " "

                if reconnu == False:
                    print 'OUI'
                    print "changement de status"
                    ecrireStatus("Imade")
                    reconnu = True

                debut = time.time()

            else:
                os.system('cls' if os.name == 'nt' else 'clear')
                print 'NON'
                print confidence
                print x, " ", nx, " "
                print y, " ", ny, " "
                print w, " ", nw, " "
                print h, " ", nh, " "
Exemple #20
0
def LBPHupdate(ID):
	labels=[]
	images=[]
	# make sure this is the right file name
	faceCascade = cv2.CascadeClassifier(cascadePath)
	
	counter=0
	#counter2=0
	foldername=ID;
	if not os.path.exists(foldername):
	    os.makedirs(foldername)

	name=foldername+"/Images"
	camera=PiCamera()
	camera.resolution=(320,240)
	camera.framerate=32
	rawCapture=PiRGBArray(camera,size=(320,240))
	time.sleep(3)

	cv2.namedWindow("Capturing new images")
	camera.capture(rawCapture,format="bgr",use_video_port=True)
	while rawCapture is not None and counter<30:
		image=rawCapture.array
		gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
		result=face.detect_single(gray)
		cv2.imshow("Capturing new images",image)
		if result is None:
			flag=0
			print "could not detect single face. Please retry."
		else:
			x,y,w,h=result
			flag=1
			cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
			scaled_byRatio=face.crop(gray,x,y,w,h)
			resized=face.resize(scaled_byRatio)
			print "Saved captured image No."+str(counter)
			counter=counter+1
			filename = name + str(counter) + ".pgm"
			cv2.imwrite(filename,resized)
	        
		rawCapture.truncate(0)
		camera.capture(rawCapture,format="bgr",use_video_port=True)
		key=cv2.waitKey(1)       

	    	
	camera.close()
	cv2.destroyWindow("Capturing new images")


	#update database
	print 'Loading training data...'
	model=cv2.createLBPHFaceRecognizer()
	model.load(TRAINING_FILE)
	print 'Training data loaded!'

	f=open(CSV_FILE,'r+')
	t=open(LOOKUP_FILE,'r+')
	en=open(ENROLLMENT_FILE,'r+')
	#Get label
	f.seek(-10,2)
	s=f.readline()
	#print s
	list=s.split(';')
	num=str(list[1]).split('\n')
	#new label no.
	label=int(num[0])+1
	#print label

	f.seek(0,2)
	t.seek(0,2)
	en.seek(0,2)

	faces=[]
	labels=[]

	DIRECTORY=foldername
	#print DIRECTORY

	SEPARATOR=";"

	for files in os.listdir(DIRECTORY):
	    abs_path="%s\%s"%(DIRECTORY,files)
	    seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n'])
	    f.write(seq)
	    
	t.write(''.join([str(DIRECTORY),';',abs_path,';\n']));

	en.write(''.join([str(label),'\n']))

	f.close()
	t.close()
	en.close()

	for filename in walk_files(DIRECTORY,'*.pgm'):
	    #print filename
	    faces.append(prepare_image(filename))
	    labels.append(label)

	model.update(np.asarray(faces), np.asarray(labels))
	#print model

	#Save model results
	model.save(TRAINING_FILE)
	print 'Training data saved to',TRAINING_FILE

	print "successfully updated"
Exemple #21
0
def prepare_image(filename):
	"""Read an image as grayscale and resize it to the appropriate size for
	training the face recognition model.
	"""
	return face.resize(cv2.imread(filename, cv2.IMREAD_GRAYSCALE))
def Authenticate():
    #load lookup table_ ky
    tableName = LOOKUP_FILE
    table = []
    samples = []
    load_table(tableName, table, samples)

    # Create window
    cv2.namedWindow("Preview")
    #cv2.namedWindow("Compared")

    # Load training data into model
    print 'Loading training data...'
    model = cv2.createLBPHFaceRecognizer()
    model.load(TRAINING_FILE)
    print 'Training data loaded!'

    confidences = []
    labels = []

    vc = cv2.VideoCapture(1)
    print 'Looking for face...'
    if vc.isOpened():  # try to get the first frame
        rval, frame = vc.read()
        print "opened"
        while rval == False:
            rval, frame = vc.read()
    else:
        print "error"
        rval = False

    count = 30
    recognition = 0
    while rval:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        result = face.detect_single(gray)
        cv2.imshow("Preview", frame)
        key = cv2.waitKey(1)
        if result is None:
            print "Please face to the camera "
        else:
            x, y, w, h = result
            # Crop and resize image to face
            crop = face.resize(face.crop(gray, x, y, w, h))
            label, confidence = model.predict(crop)
            confidences.append(confidence)
            labels.append(label)
            cv2.waitKey(1)
            count -= 1
        if count <= 0:
            break
        rval, frame = vc.read()
    print "finish capturing faces"
    vc.release()
    cv2.destroyWindow("Preview")

    temp = []
    i = 0
    length = len(labels)
    while length > 0:
        if i == 0:
            temp.append(labels[length - 1])
            i += 1
            length -= 1
        else:
            tempi = 0
            while tempi < i:
                if labels[length - 1] != temp[tempi]:
                    tempi += 1
                else:
                    length -= 1
                    break
            if tempi == i:
                temp.append(labels[length - 1])
                i += 1
            length -= 1

    #print labels
    #print temp
    #print i

    tempi = 0
    numoflabel = 0
    if i > 5:
        print "could not enter"
    else:
        element = temp[tempi]
        while tempi < i:
            tempj = 0
            count = 0
            while tempj < len(labels):
                if labels[tempj] == temp[tempi]:
                    count += 1
                tempj += 1
            if count > numoflabel:
                numoflabel = count
                element = temp[tempi]
            tempi += 1
        print "element is {}, numoflabel is {}".format(element, numoflabel)

    tempi = 0
    con = 0
    while tempi < len(labels):
        if labels[tempi] == element:
            con = con + confidences[tempi]
        tempi += 1
    ave = con / numoflabel

    print "mean of confidences is {}".format(ave)
    #print confidences

    # print recognition
    f = open(ENROLLMENT_FILE, 'r')
    s = f.readline()
    flag = 0
    while s != "":
        index = int(s)
        #print index
        if index == element:
            flag = 1
            print "flag TRUE"
            break
        s = f.readline()
    if ave < 50 and flag == 1:
        print "authenticated"
        return 1, element
    else:
        print "could not enter"
        return 0, -1
def LBPHupdate(ID):
    labels = []
    images = []
    # make sure this is the right file name
    faceCascade = cv2.CascadeClassifier(cascadePath)

    counter = 0
    #counter2=0
    foldername = ID
    if not os.path.exists(foldername):
        os.makedirs(foldername)

    name = foldername + "/Images"
    camera = PiCamera()
    camera.resolution = (320, 240)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(320, 240))
    time.sleep(3)

    cv2.namedWindow("Capturing new images")
    camera.capture(rawCapture, format="bgr", use_video_port=True)
    while rawCapture is not None and counter < 30:
        image = rawCapture.array
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        result = face.detect_single(gray)
        cv2.imshow("Capturing new images", image)
        if result is None:
            flag = 0
            print "could not detect single face. Please retry."
        else:
            x, y, w, h = result
            flag = 1
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            scaled_byRatio = face.crop(gray, x, y, w, h)
            resized = face.resize(scaled_byRatio)
            print "Saved captured image No." + str(counter)
            counter = counter + 1
            filename = name + str(counter) + ".pgm"
            cv2.imwrite(filename, resized)

        rawCapture.truncate(0)
        camera.capture(rawCapture, format="bgr", use_video_port=True)
        key = cv2.waitKey(1)

    camera.close()
    cv2.destroyWindow("Capturing new images")

    #update database
    print 'Loading training data...'
    model = cv2.createLBPHFaceRecognizer()
    model.load(TRAINING_FILE)
    print 'Training data loaded!'

    f = open(CSV_FILE, 'r+')
    t = open(LOOKUP_FILE, 'r+')
    en = open(ENROLLMENT_FILE, 'r+')
    #Get label
    f.seek(-10, 2)
    s = f.readline()
    #print s
    list = s.split(';')
    num = str(list[1]).split('\n')
    #new label no.
    label = int(num[0]) + 1
    #print label

    f.seek(0, 2)
    t.seek(0, 2)
    en.seek(0, 2)

    faces = []
    labels = []

    DIRECTORY = foldername
    #print DIRECTORY

    SEPARATOR = ";"

    for files in os.listdir(DIRECTORY):
        abs_path = "%s\%s" % (DIRECTORY, files)
        seq = ''.join([str(abs_path), str(SEPARATOR), str(label), '\n'])
        f.write(seq)

    t.write(''.join([str(DIRECTORY), ';', abs_path, ';\n']))

    en.write(''.join([str(label), '\n']))

    f.close()
    t.close()
    en.close()

    for filename in walk_files(DIRECTORY, '*.pgm'):
        #print filename
        faces.append(prepare_image(filename))
        labels.append(label)

    model.update(np.asarray(faces), np.asarray(labels))
    #print model

    #Save model results
    model.save(TRAINING_FILE)
    print 'Training data saved to', TRAINING_FILE

    print "successfully updated"
Exemple #24
0
import fnmatch
import os

import cv2

import config
import face


def walk_files(directory, match="*"):
    for root, dirs, files in os.walk(directory):
        for filename in fnmatch.filter(files, match):
            yield os.path.join(root, filename)


for filename in walk_files(config.CAPTURE_DIR, "*.jpg"):

    img = cv2.imread(filename)
    bw_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    result = face.detect_single(bw_img)

    if result is None:
        print "No face detected ... " + filename
    else:
        x, y, w, h = result
        crop = face.resize(face.crop(img, x, y, w, h))
        cv2.imwrite(filename, crop)
        print "Cropped file ... ", filename
		def run(self):

			debut = time.time()
			self.ecrireStatus("INCONNU")
			reconnu=False

			# Load training data into model
			print 'Loading training data...'
			model = cv2.createEigenFaceRecognizer()
			model.load(config.TRAINING_FILE)
			print 'Training data loaded!'
			
			# Initialize camer and box.
			camera = cv2.VideoCapture(0)

			print 'Press Ctrl-C to quit.'
			
			while True:
						if time.time() - debut > 3:
							print reconnu
							reconnu=False
							ecrireStatus("INCONNU")
							print "3 secondes passees sans reconnaissance"
				
						ret, frame = camera.read()

						# Convert image to grayscale.
						image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
						
						# Get coordinates of single face in captured image.
						result = face.detect_single(image)
						if result is None:
							os.system('cls' if os.name=='nt' else 'clear')
							print 'PAS DE VISAGE'
							continue
						
						#dessiner le carre
						for (x, y, w, h) in result:
					    		cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
						#Display the resulting frame
						#cv2.imshow('Video', frame)
						
						nx = round(w*MagicMirror.coefW) + x
						ny = round(h*MagicMirror.coefH) + y
						nw = w - round(w*2*MagicMirror.coefW)
						nh = h - round(h*2*MagicMirror.coefH)

						

						cv2.imshow("Adding faces to traning set...", frame[ny: ny + nh, nx: nx + nw])
						cv2.waitKey(1) 
						

						x, y, w, h = result[0]
						# Crop and resize image to face.
						crop = face.resize(face.crop(image, nx, ny, nw, nh))

						# Test face against model.
						label, confidence = model.predict(crop)

						if label == config.POSITIVE_LABEL and confidence < MagicMirror.confidenceTolerate:
							os.system('cls' if os.name=='nt' else 'clear')
							print 'OUI'
							print confidence
							print x," ",nx," "
							print y," ",ny," "
							print w," ",nw," "
							print h," ",nh," "


							if reconnu == False:
								print 'OUI'
								print "changement de status"
								ecrireStatus("Imade")
								reconnu = True

							debut = time.time()
							
							
						else:
							os.system('cls' if os.name=='nt' else 'clear')
							print 'NON'
							print confidence
							print x," ",nx," "
							print y," ",ny," "
							print w," ",nw," "
							print h," ",nh," "
    def Authenticate(cls):
        #load lookup table_ ky
        tableName = LOOKUP_FILE
        table = []
        samples = []
        #self.load_table(tableName,table,samples)

        # Create window
        cv2.namedWindow("Preview")
        #cv2.namedWindow("Compared")

        # Load training data into model
        print 'Loading training data...'
        model = cv2.createLBPHFaceRecognizer()
        model.load(TRAINING_FILE)
        print 'Training data loaded!'

        confidences = []
        labels = []

        camera = PiCamera()
        camera.resolution = (320, 240)
        camera.framerate = 32
        rawCapture = PiRGBArray(camera, size=(320, 240))
        time.sleep(3)

        count = 30
        reccognition = 0

        print 'Looking for face...'
        camera.capture(rawCapture, format="bgr", use_video_port=True)
        while rawCapture is not None:
            image = rawCapture.array
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            result = face.detect_single(gray)
            cv2.imshow("Preview", image)
            key = cv2.waitKey(1)
            if result is None:
                print "Please face to the camera "
            else:
                x, y, w, h = result
                # Crop and resize image to face
                crop = face.resize(face.crop(gray, x, y, w, h))
                label, confidence = model.predict(crop)
                confidences.append(confidence)
                labels.append(label)
                cv2.waitKey(1)
                count -= 1
            if count <= 0:
                break
            rawCapture.truncate(0)
            camera.capture(rawCapture, format="bgr", use_video_port=True)

        print "finish capturing faces"
        camera.close()
        cv2.destroyWindow("Preview")

        temp = []
        i = 0
        length = len(labels)
        while length > 0:
            if i == 0:
                temp.append(labels[length - 1])
                i += 1
                length -= 1
            else:
                tempi = 0
                while tempi < i:
                    if labels[length - 1] != temp[tempi]:
                        tempi += 1
                    else:
                        length -= 1
                        break
                if tempi == i:
                    temp.append(labels[length - 1])
                    i += 1
                length -= 1

        print "------LABELS:{}".format(labels)
        print "------DIFFERENT LABELS:{}".format(temp)
        print "------NUMBER OF DIFFERENT LABELS:{}".format(i)

        tempi = 0
        numoflabel = 0
        if i > 5:
            print "could not enter"
            return 0, -1
        else:
            element = temp[tempi]
            while tempi < i:
                tempj = 0
                count = 0
                while tempj < len(labels):
                    if labels[tempj] == temp[tempi]:
                        count += 1
                    tempj += 1
                if count > numoflabel:
                    numoflabel = count
                    element = temp[tempi]
                tempi += 1
            print "element is {}, numoflabel is {}".format(element, numoflabel)

        tempi = 0
        con = 0
        while tempi < len(labels):
            if labels[tempi] == element:
                con = con + confidences[tempi]
            tempi += 1
        ave = con / numoflabel

        print "mean of confidences is {}".format(ave)
        #print confidences

        # print recognition
        f = open(ENROLLMENT_FILE, 'r')
        s = f.readline()
        flag = 0
        while s != "":
            index = int(s)
            #print index
            if index == element:
                flag = 1
                print "flag TRUE"
                break
            s = f.readline()

        if ave < 52 and flag == 1:
            print "authenticated"
            return 1, element
        else:
            print "could not enter"
            return 0, -1
        # TODO: Check if button is pressed.
        # if is_letter_input('c'):
        # Check for the positive face and unlock if found.
        image = camera.read()
        # Convert image to grayscale.
        image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        # Get coordinates of single face in captured image.
        result = face.detect_single(image)
        if result is None:
            print "No Face"
            time.sleep(10)
            personPresent = 0
            continue
        x, y, w, h = result
        # Crop and resize image to face.
        crop = face.resize(face.crop(image, x, y, w, h))
        # Test face against model.
        label, confidence = model.predict(crop)
        print "Predicted {0} face with confidence {1} (lower is more confident).".format(
            "POSITIVE" if label == config.POSITIVE_LABEL else "NEGATIVE", confidence
        )
        if label == config.POSITIVE_LABEL and confidence < config.POSITIVE_THRESHOLD and personPresent == 0:
            print "Recognized face!"
            personPresent = 1
            with open("/home/ubuntu/chatbotio", "w") as file:
                file.write("Greetings Human")
            time.sleep(10)
        else:
            print "Did not recognize face"
            time.sleep(10)
	def LBPHupdate(cls,ID):
		labels=[]
		images=[]
		# make sure this is the right file name
		faceCascade = cv2.CascadeClassifier(cascadePath)

		cv2.namedWindow("preview")

		vc = cv2.VideoCapture(0) # device ID may not be 0

		counter=0
		#counter2=0
		foldername=ID;
		if not os.path.exists(foldername):
			os.makedirs(foldername)

		name=foldername+"/Images"


		if vc.isOpened(): # try to get the first frame
			rval, frame = vc.read()
			print "opened"
			while rval==False:
				rval,frame=vc.read()
		else:
			print "error"
			rval = False
		# if vc.isOpened(): # try to get the first frame
		#     rval, frame = vc.read()
		#     print "opened"
		# else:
		#     print "error"
		#     rval = False

		while rval and counter<30:
			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
			#faces =faceCascade.detectMultiScale(gray)
			result=face.detect_single(gray)
			cv2.imshow("preview",frame)
			#for (x, y, w, h) in face:
			if result is None:
				flag=0
				print "could not detect single face. Please retry."
			else:
				x,y,w,h=result
				#print "hello"
				flag=1
				cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
				scaled_byRatio=face.crop(gray,x,y,w,h)
				resized=face.resize(scaled_byRatio)
				#resized=cv2.resize(gray[y:y+h,x:x+w],(200,200),interpolation=cv2.INTER_CUBIC)
				# keyword "interpolation" should not be left out
				cv2.imshow("preview", frame)
				#print "Ready for capture"
				print "Saved captured image No."+str(counter)
				counter=counter+1
				filename = name + str(counter) + ".pgm"
				cv2.imwrite(filename,resized)

			rval, frame = vc.read()
			key=cv2.waitKey(1)


		vc.release()
		cv2.destroyWindow("preview")


		#update database
		print 'Loading training data...'
		model=cv2.createLBPHFaceRecognizer()
		model.load(TRAINING_FILE)
		print 'Training data loaded!'

		f=open(CSV_FILE,'r+')
		t=open(LOOKUP_FILE,'r+')
		en=open(ENROLLMENT_FILE,'r+')
		#Get label
		f.seek(-10,2)
		s=f.readline()
		#print s
		list=s.split(';')
		num=str(list[1]).split('\n')
		#new label no.
		label=int(num[0])+1
		#print label

		f.seek(0,2)
		t.seek(0,2)
		en.seek(0,2)

		faces=[]
		labels=[]

		DIRECTORY=foldername
		#print DIRECTORY

		SEPARATOR=";"

		for files in os.listdir(DIRECTORY):
			abs_path="%s\%s"%(DIRECTORY,files)
			seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n'])
			f.write(seq)

		t.write(''.join([str(DIRECTORY),';',abs_path,';\n']));

		en.write(''.join([str(label),'\n']))

		f.close()
		t.close()
		en.close()

		for filename in cls.walk_files(DIRECTORY,'*.pgm'):
			#print filename
			faces.append(cls.prepare_image(filename))
			labels.append(label)

		model.update(np.asarray(faces), np.asarray(labels))
		#print model

		#Save model results
		model.save(TRAINING_FILE)
		print 'Training data saved to',TRAINING_FILE

		print "successfully updated"

		shutil.rmtree(foldername)
		return label