Beispiel #1
0
    def _worker_init_face_recognizer(self):
        starttime = time.time()
        face_recognizer = cv2.createLBPHFaceRecognizer()
        faces_dir = self.get_faces_rootfolder()
        logging.info("OpenCV._worker_init_face_recognizer() loading face-images from %s..." % (faces_dir,))

        face_images = []
        self._face_label2name = {-1: "*ingen*"}
        face_labels = []
        for face_label, (dirpath, dirnames, filenames) in enumerate(os.walk(faces_dir)):
            name = os.path.basename(dirpath)
            for filename in filenames:
                if not filename.startswith(".") and ("_raw" in filename):
                    # The coordinates of the eyes are encoded in the image filename
                    timestamp, eye_left_x, eye_left_y, eye_right_x, eye_right_y = filename.split("_")[:5]
                    filepath = os.path.join(dirpath, filename)

                    normalized_face_filename = os.path.join(dirpath, "%s_normalized.tiff" % (timestamp,))
                    if os.path.exists(normalized_face_filename):
                        normalized_face = cv2.imread(normalized_face_filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
                        is_high_quality_image = True
                    else:
                        logging.info(
                            "No normalized version found for the raw image, so I'll create it now (raw image:%s)"
                            % (filepath,)
                        )

                        original_image = cv2.imread(filepath)
                        normalized_face, is_high_quality_image = self.normalize_faceimage(
                            original_image,
                            eye_left=(int(eye_left_x), int(eye_left_y)),
                            eye_right=(int(eye_right_x), int(eye_right_y)),
                        )
                        if is_high_quality_image:
                            cv2.imwrite(normalized_face_filename, normalized_face)
                        else:
                            logging.warn(
                                "OpenCV._worker_init_face_recognizer() didn't find a face in the file '%s'!"
                                % (filepath,)
                            )
                            # os.remove(filepath)

                    if is_high_quality_image:
                        face_images.append(normalized_face)
                        face_labels.append(face_label)
                        self._face_label2name[face_label] = name

        face_labels = numpy.asarray(face_labels, dtype=numpy.int32)
        face_images = numpy.asarray(face_images)

        if len(face_images) > 0:
            self.face_recognizer = cv2.createLBPHFaceRecognizer()
            self.face_recognizer.train(face_images, face_labels)
        else:
            self.face_recognizer = None
        elapsedtime = time.time() - starttime
        logging.info(
            "OpenCV._worker_init_face_recognizer() loaded %d images for %d labels in %.2f seconds"
            % (len(face_images), len(self._face_label2name) - 1, elapsedtime)
        )
 def __init__(self):        
     self.X = []
     self.y = []
     self.path = "Faces"
     self.recognizerPath = "LBPHFaceRecognizers/recognizer.re"
     self.speechEngine = pyttsx.init()
     try:
         self.recognizer = cv2.createLBPHFaceRecognizer()
         self.recognizer.load(self.recognizerPath)
     except:
         self.recognizer = cv2.createLBPHFaceRecognizer()
 def __init__(self):
     self.CascadePath = "assets/haarcascades/haarcascade_frontalface_alt.xml"
     self.ClientPath = "db/PredictionDatabase.csv"
     self.GenderPath = "db/GenderDatabase.csv"
     self.TrainingDBPath = "db/TrainingDatabase.csv"
     self.XML = "assets/face_trained.xml"
     self.genderXML = "assets/gender_trained.xml"
     self.ClientImage = ""
     self.FaceCascade = cv2.CascadeClassifier(self.CascadePath)
     self.Recognizer = cv2.createLBPHFaceRecognizer()
     self.GenderRecognizer = cv2.createLBPHFaceRecognizer()
     self.UserName = "******"
     self.FLAG = 0  # Utility flag
Beispiel #4
0
 def train(self,gallery):
     for face in gallery:
         img = [self.imgfun(face)]
         label = np.array([face.person])
         recognizer = cv2.createLBPHFaceRecognizer()
         recognizer.train(img,label)
         self.recognizers.append(recognizer)
Beispiel #5
0
def train_recognizers(base64_path, models_path, sample_size=10):
    if not os.path.exists(models_path):
        os.mkdir(models_path)
    # faces = paths.list_files(base64_path, validExts=('.txt',))
    # loop over the input faces for training
    for (i, path) in enumerate(glob.glob(base64_path + "/*.txt")):
        fr = FaceRecognizer(cv2.createLBPHFaceRecognizer(radius=1, neighbors=8, grid_x=8, grid_y=8))
        # labels = []

        # extract the person from the file name,
        name = path[path.rfind("/") + 1:].replace(".txt", "")
        print("[INFO] training on '{}'".format(name))

        # load the faces file, sample it, and initialize the list of faces
        sample = open(path).read().strip().split("\n")
        sample = random.sample(sample, min(len(sample), sample_size))
        faces = []

        # loop over the faces in the sample
        for face in sample:
            # decode the face and update the list of faces
            faces.append(encodings.base64_decode_image(face))

        # train the face detector on the faces and update the list of labels
        fr.train(faces, np.array([i] * len(faces)))
        # labels = name

        # update the face recognizer to include the face name labels, then write the model to file
        fr.setLabels((name,))
        model_path = os.path.join(models_path, name + '.model')
        fr.save(model_path)
def run_training(training_data, identity, frames):
    # For face detection we will use the Haar Cascade provided by OpenCV.
    cascadePath = "haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascadePath)

    # For face recognition we will the the LBPH Face Recognizer 
    recognizer = cv2.createLBPHFaceRecognizer()
    try:
        recognizer.load(training_data)
    except Exception:
        pass

    images, labels = [], []
    for frame in frames:
        # Convert the image format into numpy array
        image = np.array(frame, 'uint8')

        for (x, y, w, h) in faceCascade.detectMultiScale(image):
            images.append(image[y: y + h, x: x + w])
            labels.append(int(identity))
            cv2.waitKey(1)

    print len(images)
    cv2.destroyAllWindows()


    # Perform the training
    recognizer.update(images, np.array(labels));
    recognizer.save(training_data)
 def cv_init(self):
     self.bridge = CvBridge()
     cascade_eye = "./model/haarcascades/haarcascade_eye.xml"
     cascade_1 = rospy.get_param(
         "~cascade_1", "./model/haarcascades/haarcascade_frontalface_alt2.xml")
     cascade_2 = rospy.get_param(
         "~cascade_2", "./model/haarcascades/haarcascade_frontalface_alt.xml")
     cascade_3 = rospy.get_param(
         "~cascade_3", "./model/haarcascades/haarcascade_frontalface_profileface.xml")
     self.cascade_1 = cv2.CascadeClassifier(cascade_1)
     self.cascade_2 = cv2.CascadeClassifier(cascade_2)
     self.cascade_3 = cv2.CascadeClassifier(cascade_3)
     self.cascade_eye = cv2.CascadeClassifier(cascade_eye)
     self.haar_scaleFactor = rospy.get_param("~haar_scaleFactor", 1.3)
     self.haar_minNeighbors = rospy.get_param("~haar_minNeighbors", 4)
     self.haar_minSize = rospy.get_param("~haar_minSize", 30)
     #self.haar_maxSize = rospy.get_param("~haar_maxSize", 150)
     self.haar_params = dict(scaleFactor=self.haar_scaleFactor,
                             minNeighbors=self.haar_minNeighbors,
                             flags=cv.CV_HAAR_DO_CANNY_PRUNING,
                             minSize=(self.haar_minSize, self.haar_minSize)
                             )
     self.colors = dict(green=(0, 255, 0), red=(255, 0, 0))
     self.recognizer = cv2.createLBPHFaceRecognizer()
     self.imgPlayer = imtools.ImagePlayer(title='face detection')
     self.save_folder = './tmp'
     self.faces = []
     self.rects = []
     self.name_id = {}
     self.name_rect = {}
     self.target = 'abner'
     self.frames = []
     self.headers = []
     self.frame_max = 30
Beispiel #8
0
def predict_image(face):
    global variable_faces
    num_iterations = 300
    matches_this_iteration = 0
    for j in xrange(num_iterations):
        try:
            listlabel=[]
            recognizers = []
            variable_faces = []
            num_recognizers = 3
            for j in xrange(num_recognizers):
                lbh_recognizer = cv2.createLBPHFaceRecognizer()
                recognizers.append(lbh_recognizer)
            recognizers = train_recognizers(recognizers,listlabel)
            average_confidence = 0
            CONFIDENCE_THRESHOLD = 100.0
            labels = []
            for lbh_recognizer in recognizers:
                [label, confidence] = lbh_recognizer.predict(np.asarray(face))
                average_confidence += confidence
                labels.append(label)
            average_confidence /= num_recognizers
            if len(set(labels))==1 and average_confidence < CONFIDENCE_THRESHOLD:
                    matches_this_iteration += 1
                    if matches_this_iteration == 3:
                        return (directory_name_to_display_name(findlabel(listlabel,labels[0])))
        except:
            continue
    return None
Beispiel #9
0
    def test_recognize_model_internal_models(self):

        base_path = os.path.join('..', 'test_files', 'face_models')

        face_rec_data = os.path.abspath(
            os.path.join(base_path, 'face_rec_data'))

        params = {c.GLOBAL_FACE_MODELS_MIN_DIFF_KEY: -1,
                  c.GLOBAL_FACE_REC_DATA_DIR_PATH_KEY: face_rec_data}

        self.test_add_face()

        fm = FaceModels(params)

        fm.load_enabled_models()

        model_id = 0
        im_list = [os.path.join(base_path, '0000000_aligned.png'),
                   os.path.join(base_path, '0000005_aligned.png')]
        model_path_0 = fm.create_model_from_image_list(im_list, model_id)

        query_model = cv2.createLBPHFaceRecognizer()
        query_model.load(model_path_0)

        rec_results = fm.recognize_model(query_model)

        print('rec_results', rec_results)

        rec_result = rec_results[0]

        self.assertEqual(rec_result[c.ASSIGNED_TAG_KEY], 3812)
def get_Faces_Labels_AndTrain(path , delay ):
    recognizer = cv2.createLBPHFaceRecognizer()
    print "\nMade the LBPH(LocalBinaryPatternHistogram) recognizer "
    print "Adding  PATH=",path
    image_paths = [os.path.join(path,f)  for f in os.listdir(path)if not f.endswith('.zzzzzzzzzzzzzz')]
    faces = []
    labels = []
    count = 0;
    for image_path in image_paths:
        print " getting the image"
        image_pil = Image.open(image_path).convert('L')
        print "Conv. to GrayScale "
        image_pil = np.array( image_pil ,  'uint8' )
        face = faceCascade.detectMultiScale( image_pil )
        nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject", "")) 
        for (x, y, w, h) in face:

            count= count +1
            print "COUNT = " , count
            faces.append(image_pil [y: y + h, x: x + w])
            cv2.imshow("Adding faces to traning set...", image_pil[y: y + h, x: x + w])
            cv2.waitKey(delay)
            labels.append(nbr)
            print "recognizer updated... NUMBER OF FACES =", len(faces) , "Number of Lables =" , len(labels)
            recognizer.train(faces, np.array(labels))
            print "LABEL_NUMBER=", nbr
            if (count>3):
                nbr_predicted, conf = recognizer.predict( image_pil[y: y + h, x: x + w])
                print "PERCENT MATCH=",conf , "NUMBER PREDICTED = " , nbr_predicted
    cv2.destroyAllWindows()        
    print "USED THE GET FACES FUNCTION"
    return recognizer
	def consume(self, frame, ROIs, ROIs_coordinates, faces):
		if len(ROIs) == 1:
			ROI_x = ROIs_coordinates[0][0]
			ROI_y = ROIs_coordinates[0][1]
			ROI_w = ROIs[0].shape[0]
			ROI_h = ROIs[0].shape[1]
			cv2.rectangle(frame, (ROI_x, ROI_y), (ROI_x+ROI_w, ROI_y+ROI_h), (0, 0, 255))

		if not cv2.waitKey(1) & 0xFF == ord('p'):
			return frame

		if self.image_count == self.IMAGE_COUNT:
			if self.ALREADY_TRAINED:
				return

			print str(self.image_count) + " images were captured. Training recognizer ..."
			images, labels = self.get_images_and_labels(self.FACE_PATH)
			recognizer = cv2.createLBPHFaceRecognizer()
			self.train_recognizer(recognizer, images, labels)
			self.EXIT_FLAG = True
			return

		if len(faces) > 1:
			print "Detected more than one face. Skipping frame ..."
			return None

		if len(faces) == 0:
			print "No faces in the image. Skipping frame ..."
			return None

		self.image_count += 1
		cv2.imwrite(self.FACE_PATH + "/" + str(self.image_count) + self.FRAMECAPTURE_EXTENSION, ROIs[0])
		print "Saved image!"

		return frame
Beispiel #12
0
def refresh_data():
    # Get path to all images
    path = '../rsc/images'
    image_paths = [os.path.join(path, f) for f in os.listdir(path)]

    # Initialize empty lists for these images and labels
    images = []
    labels = []

    for path in image_paths:
        images.append(cv2.imread(path, cv2.CV_LOAD_IMAGE_GRAYSCALE))
        labels.append(int(os.path.split(path)[1].split("_")[0]))

    labels = array(labels)

    # Create recognizers
    eigen_recognizer = cv2.createEigenFaceRecognizer()
    fisher_recognizer = cv2.createFisherFaceRecognizer()
    lbhp_recognizer = cv2.createLBPHFaceRecognizer()

    # Train recognizers
    eigen_recognizer.train(images, labels)
    fisher_recognizer.train(images, labels)
    lbhp_recognizer.train(images, labels)

    # Save results
    eigen_recognizer.save(EIGEN_RECOGNIZER_PATH)
    fisher_recognizer.save(FISHER_RECOGNIZER_PATH)
    lbhp_recognizer.save(LBHP_RECOGNIZER_PATH)
Beispiel #13
0
def rePath(imgpath,xml="",csv='csv.txt'):
    model = cv2.createLBPHFaceRecognizer()
    model.load(xml+"face-lbph.yml")
    try:
        im = cv2.imread(imgpath, cv2.IMREAD_GRAYSCALE)
    except IOError, (errno, strerror):
        print "I/O error({0}): {1}".format(errno, strerror)
Beispiel #14
0
def trainImage(cur):
	#first need to train sample images in database
	recognizer = cv2.createLBPHFaceRecognizer()
	#get training image path from database and train them
	sql = "SELECT * FROM users"
	cur.execute(sql)
	
	images = []
	labels = []
	for row in cur.fetchall():
		#need to modify the argument
		label = row[0]
		
		image_pil = Image.open(row[4]).convert('L')
		image = np.array(image_pil, 'uint8')
		#faces = cascade.detectMultiScale(image, 1.3, 5, minSize = (70, 70), flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
		faces = cascade.detectMultiScale(image)
		# If face is detected, append the face to images and the label to labels
		for (x, y, w, h) in faces:
			cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
			images.append(image[y: y + h, x: x + w])
			labels.append(row[0])
	
	
	recognizer.train(images, np.array(labels))
	return recognizer
Beispiel #15
0
    def __init__(self,useGUI=False,debugWin=True,
                 drawFaceRct=True,
                 faceRecognize=True,faceRecognizeXmlfile=None):
        self._useGUI=useGUI
        self._debugWin=debugWin
        self.drawFaceRct=drawFaceRct
        #self.drawMoveRct=drawMoveRct
        self.faceRecongnize=faceRecognize
        self._recognizeFace_xml_file=faceRecognizeXmlfile

        self._front=IOSteam()
        self._detectFace=FaceTracker()
        self._recognizeFace=cv2.createLBPHFaceRecognizer(neighbors=8)
        self.displayFrame=None
        try:
            self._recognizeFace.load(self._recognizeFace_xml_file)

        except:
            logging.debug('[-]recognize DB xml file:{} fail to load'.format(
                                                self._recognizeFace_xml_file))
            logging.info( '[-]without DB xml ,recognize may be reported Error!')
        #self._trackers=[]
        self._faceLost=False
        #self._faceReg_api=API(API_KEY, API_SECRET)
        self._res=[]
        self._mode=self.Available_Mode[0]
        
        
        self.faceSetinSameFrame=[]
        self.faceSetofImageinSameFrame=[]
        self.faceSetofImageinSameFrameResized=[]
def main():
  args = configureArguments()
  configureLogging(args.log)

  #faceSize = (100, 100)
  faceSize = None

  logging.debug('Creating face recognizer...')
  #fr = cv2.createFisherFaceRecognizer()
  fr = cv2.createLBPHFaceRecognizer()

  #modelFile = '/home/juan/ciberpunks/faces/lpbFaceModel'
  #if os.path.isfile(modelFile):
  #  fr.load(modelFile)
  #  logging.info('Loaded saved model state.')
  #else:
  trainPaths = [
    '/home/juan/ciberpunks/faces/at&t_database', 
    '/home/juan/ciberpunks/faces/lfw2', 
    '/home/juan/ciberpunks/faces/prestico']

  [images, labels, subjects] = readImages(trainPaths, faceSize)

  logging.debug('Training face recognizer...')
  fr.train(images, labels)
  #fr.save(modelFile)
  #logging.info('Saved a trained model state.')

  logging.debug('Staring face recognition...')
  #recognizePictureCandidates(fr, subjects, faceSize)
  recognizeVideo(fr, args.videoFileName, subjects, args.haarFolder)
def train(path = './yalefaces/subject02', name = None):
    """
    Takes in a path for a folder filled with training images for a specific subject.
    It trains a recognizer for the person using the images. It then saves the recognizer in an .xml
    file. For now, it saves all the recognizers in a folder called recognizers in the directory where
    the code is located.

        Args:
            path (str): Path name of folder containing images
            name (str): Name of the subject

        Returns:
            list : List of training images in an np.array form
            list : Array of labels associated with every name


    """
    if name is None:
        name = path.split("/")[2]
    #Create Recognizer object
    recognizer = cv2.createLBPHFaceRecognizer()
    #retrieve all the images nad labels associated with the training images
    images, labels = get_image_and_labels(path)
    #train recognizer with the images
    recognizer.train(images, np.array(labels))
    #save all recognizers in a subfolder
    recognizer.save("./recognizers/{}.xml".format(name))
def intro():
    cascadePath="haarcascade_frontalface_default.xml"
    faceCascade=cv2.CascadeClassifier(cascadePath)
    recognizer=cv2.createLBPHFaceRecognizer()
    recognizer.load("rec.xml")
    urllib.urlretrieve("http://192.168.0.100:81/snapshot.cgi?user=admin&pwd=googlevirus","detected.jpg")
    predict_image_pil=Image.open("detected.jpg").convert('L')
    predict_image=np.array(predict_image_pil)
    faces = faceCascade.detectMultiScale(
				predict_image,
                scaleFactor=1.2,
                minNeighbors=5,
                minSize=(30, 30),
                flags = cv2.cv.CV_HAAR_SCALE_IMAGE
            )
    print faces
    d={"results":[]}
    for (x,y,w,h) in faces:
    	nbr_predicted,conf=recognizer.predict(cv2.resize(predict_image[y:y+h,x:x+w],(40,40),interpolation=cv2.INTER_CUBIC))
    	print nbr_predicted,conf
    	if conf<=50.0:
    		print "hi"
    		if nbr_predicted==1:
    			data={"name":"Rishav","message":"hi master"}
    			print data
    			d["results"].append(data)
    		elif nbr_predicted==2:
    			data={"name":"Kushagra","message":"hi bhole"}
    			d["results"].append(data)
    return json.dumps(d)
Beispiel #19
0
def get_images_and_labels(path):
    # For face detection we will use the Haar Cascade provided by OpenCV.
    cascadePath = "./config/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascadePath)

    # For face recognition we will the the LBPH Face Recognizer
    recognizer = cv2.createLBPHFaceRecognizer()
    # Append all the absolute image paths in a list image_paths
    # We will not read the image with the .sad extension in the training set
    # Rather, we will use them to test our accuracy of the training
    image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('.sad')]
    # images will contains face images
    images = []
    # labels will contains the label that is assigned to the image
    labels = []
    for image_path in image_paths:
        # Read the image and convert to grayscale
        image_pil = Image.open(image_path).convert('L')
        # Convert the image format into numpy array
        image = np.array(image_pil, 'uint8')
        # Get the label of the image
        nbr = int(os.path.split(image_path)[1].split("_")[0])
        # Detect the face in the image
        faces = faceCascade.detectMultiScale(image)
        # If face is detected, append the face to images and the label to labels
        for (x, y, w, h) in faces:
            images.append(image[y: y + h, x: x + w])
            labels.append(nbr)
            #cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
            #cv2.waitKey(50)
    # return the images list and labels list
    return images, labels
Beispiel #20
0
def learnCollectedFaces(preprocessedFaces, faceLabels, facerecAlgorithm, recognizer):
    print "Learning the collected faces using the {0} algorithm...".format(facerecAlgorithm)
    
    # Make sure the "contrib" module is dynamically loaded at runtime
    # Requires OpenCV v2.4.1 or later (from June 2012), otherwise the FaceRecognizer will not compile or run
#     haveContribModule =  
    
#     if not haveContribModule:
#         print "contrib module is needed for facerecognizer"
#         sys.exit()

    # create recognizer depending on the defined algorithm
    faceLabels = np.array(faceLabels)
    
    if facerecAlgorithm == 'Fisherfaces':
        recognizer = cv2.createFisherFaceRecognizer()
    elif facerecAlgorithm == 'Eigenfaces':
        recognizer = cv2.createEigenFaceRecognizer()
    else:
        recognizer = cv2.createLBPHFaceRecognizer()
    
#     faceLabels = np.array(faceLabels)
    print np.unique(faceLabels)
    recognizer.train(preprocessedFaces, faceLabels)
    return recognizer
Beispiel #21
0
def check_trainer_accuracy():
    testRecognizer = cv2.createLBPHFaceRecognizer(neighbors=5)
    (train_images, train_labels) = get_images_and_labels(TRAINING_DIR)


    test_images = []
    test_labels = []

    for i in range(0, len(train_images)/10):
        index = random.randrange(len(train_images)-1)
        test_images.append(train_images[index])
        test_labels.append(train_labels[index])
        del train_images[index]
        del train_labels[index]

    train_label_codes = []
    for label in train_labels:
        train_label_codes.append(classification_utils.lookup_code(label))

    testRecognizer.train(train_images, numpy.array(train_label_codes))
    testRecognizer.save("recognizer.dat")

    num_correct = 0;
    for i in range(0, len(test_images)):
        predicted_label_code, confidence = testRecognizer.predict(test_images[i])
        actual_label = test_labels[i]
        predicted_label = classification_utils.lookup_label(predicted_label_code)
        if predicted_label == actual_label:
            num_correct += 1
            print("Correctly predicted with confidence", confidence)
        else:
            print("Incorrectly recognized", actual_label, "as", predicted_label)

    print("Predicted with", float(num_correct)/float(len(test_images)), "accuracy")
Beispiel #22
0
def main():
    parser = argparse.ArgumentParser()
    lfwDefault = os.path.expanduser(
        "~/openface/data/lfw/dlib.affine.sz:96.OuterEyesAndNose")
    parser.add_argument('--lfwAligned', type=str,
                        default=lfwDefault,
                        help='Location of aligned LFW images')
    parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
                        default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
    parser.add_argument('--largeFont', action='store_true')
    parser.add_argument('workDir', type=str,
                        help='The work directory where intermediate files and results are kept.')
    args = parser.parse_args()
    # print(args)

    if args.largeFont:
        font = {'family': 'normal', 'size': 20}
        mpl.rc('font', **font)

    mkdirP(args.workDir)

    print("Getting lfwPpl")
    lfwPplCache = os.path.join(args.workDir, 'lfwPpl.pkl')
    lfwPpl = cacheToFile(lfwPplCache)(getLfwPplSorted)(args.lfwAligned)

    print("Eigenfaces Experiment")
    cls = cv2.createEigenFaceRecognizer()
    cache = os.path.join(args.workDir, 'eigenFacesExp.pkl')
    eigenFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("Fisherfaces Experiment")
    cls = cv2.createFisherFaceRecognizer()
    cache = os.path.join(args.workDir, 'fisherFacesExp.pkl')
    fishFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("LBPH Experiment")
    cls = cv2.createLBPHFaceRecognizer()
    cache = os.path.join(args.workDir, 'lbphExp.pkl')
    lbphFacesDf = cacheToFile(cache)(opencvExp)(lfwPpl, cls)

    print("OpenFace CPU/SVM Experiment")
    net = openface.TorchNeuralNet(args.networkModel, 96, cuda=False)
    cls = SVC(kernel='linear', C=1)
    cache = os.path.join(args.workDir, 'openface.cpu.svm.pkl')
    openfaceCPUsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    print("OpenFace GPU/SVM Experiment")
    net = openface.TorchNeuralNet(args.networkModel, 96, cuda=True)
    cache = os.path.join(args.workDir, 'openface.gpu.svm.pkl')
    openfaceGPUsvmDf = cacheToFile(cache)(openfaceExp)(lfwPpl, net, cls)

    plotAccuracy(args.workDir, args.largeFont,
                 eigenFacesDf, fishFacesDf, lbphFacesDf,
                 openfaceCPUsvmDf, openfaceGPUsvmDf)
    plotTrainingTime(args.workDir, argrs.largeFont,
                     eigenFacesDf, fishFacesDf, lbphFacesDf,
                     openfaceCPUsvmDf, openfaceGPUsvmDf)
    plotPredictionTime(args.workDir, args.largeFont,
                       eigenFacesDf, fishFacesDf, lbphFacesDf,
                       openfaceCPUsvmDf, openfaceGPUsvmDf)
Beispiel #23
0
def trainModel():
	global model
	[X,y] = readImg(SAVE_DIR)
	y = np.asarray(y, dtype=np.int32)
	# model = cv2.createEigenFaceRecognizer()
	model = cv2.createLBPHFaceRecognizer()
	model.train(np.asarray(X), np.asarray(y))
	print 'finish training'
Beispiel #24
0
    def runCapture(self):
		
        imgdir = self._imagePath
        cam = self._cam
        cascade = self._cascade
         
        faceSize= (90, 90)
        model= cv2.createLBPHFaceRecognizer(threshold=70.0) 
        
                
        images,labels,namess = utils.retrain(imgdir,model,faceSize)
        #print "Nouvel etat:",len(images),"images",len(namess),"personnes"
        
        self.isSomebody = False
                  
        ret, img = cam.read()
                         
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #a voir si c'est necessaire
        gray = cv2.equalizeHist(gray)
        # dectection de visage
        rects = cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(40, 40), flags=cv2.cv.CV_HAAR_SCALE_IMAGE) #flags = cv2.CASCADE_SCALE_IMAGE     

        # ne prend que la region de l'image qui nous interesse (le visage)
        roi = None
        if len(rects)>0:
            (x, y, w, h) = rects[0]
            # crop & resize it 
            roi = cv2.resize( gray[y:y+h, x:x+h], faceSize )
            #calcule les coordonnees du rectangle et les affiche
            xCentre1=(2*x+w)/2
            yCentre1=(2*y+h)/2  
            xCentre=str(xCentre1)
            yCentre=str(yCentre1)
            centre="("+xCentre+":"+yCentre+")"

            cv2.rectangle(img, (x,y),(x+w,y+h), (0,255,0),2)

            if len(images)>0:
                
                [p_label, p_confidence] = model.predict(np.asarray(roi))
                name="unknown"
                if p_label !=-1 :
                    name = namess[p_label]
                    self.isSomebody = True
                cv2.putText( img, "%s %.2f %.2f" % (name,p_confidence,p_label),(x+10,y+20), cv2.FONT_HERSHEY_PLAIN,1.5, (0,255,0))
                self.name = name
                self.xPosition = xCentre1
                self.yPosition = yCentre1

        while True:

            cv2.imshow('Face Recognition For Poppy', img)

            k = cv2.waitKey(5) & 0xFF
        
            # quitter avec Echap
            if k == 27: break
Beispiel #25
0
 def loadMyFaceBdd(self): 
     if (os.path.isfile(self.bddDirPath+'/'+self.getListeFilename())):
         print '[myFaceBdd] Chargement de la liste des libelles'
         self.loadListeLabels()
         print '[myFaceBdd] Chargement de la base opencv'
         self.loadOpencvBdd()
         self.istrainOK = True;
     else:
         self.recognizer = cv2.createLBPHFaceRecognizer()
Beispiel #26
0
 def recognizer(self):
   """
   Creates new FaceRecognizer using Local Binary Patterns (LBP)
   Returns current recognizer object
   """
   self.current_recognizer = cv2.createLBPHFaceRecognizer()
   self.current_recognizer.train(self.faces, np.array(self.index))
   
   return self.current_recognizer
    def load(self, db_file_name):
        """
        Update the face models data structure from a file.

        :type  db_file_name: string
        :param db_file_name: the name of the file containing
                             the dump of the face models data structure

        :rtype: boolean
        :returns: True if loading was successful
        """
        if db_file_name is None:
            '''
            Set the name of database.
            Algorithm :
            LBP (Local Binary Pattern)
            '''
            db_file_name = self._db_name + '-' + self._algorithm

        tags_file_name = db_file_name + '-Tags'

        algorithm = ce.FACE_MODEL_ALGORITHM
        
        if self._params is not None:
            
            algorithm = self._params[ce.FACE_MODEL_ALGORITHM_KEY]
        
        model = None
        
        if algorithm == 'Eigenfaces':
            
            model = cv2.createEigenFaceRecognizer()
        
        elif algorithm == 'Fisherfaces':
            
            model = cv2.createFisherFaceRecognizer()
            
        elif algorithm == 'LBP':
            
            model = cv2.createLBPHFaceRecognizer()
        
        ok = False

        if os.path.isfile(db_file_name) and (os.path.isfile(tags_file_name)):

            if(not((ce.USE_TRACKING or ce.SIM_TRACKING or ce.USE_SLIDING_WINDOW)
                   and ce.LOAD_IND_FRAMES_RESULTS)):
                model.load(db_file_name)

            if not(model is None):
                self.model = model
                self._tags = utils.load_YAML_file(tags_file_name)
                ok = True
                print('\n### DB LOADED ###\n')

        return ok
Beispiel #28
0
def train_user_images(user):
    target = []
    labels = []
    for training_image in user.trainingimage_set.all():
        target.append(read_image(training_image.image.file))
        labels.append(0)

    model = cv2.createLBPHFaceRecognizer()
    model.train(numpy.asarray(target), numpy.asarray(labels))
    model.save(settings.MODEL_IMAGES_ROOT + user.username + '.yml')
Beispiel #29
0
    def load(model_path):
        # load the face recognition instance and construct the OpenCV face recognizer
        fr_path = model_path.replace('.model', '.cpikle')
        # fri = cPickle.loads(open(fr_path).read())
        fri = cPickle.loads(open(fr_path).read())
        recognizer = cv2.createLBPHFaceRecognizer()
        recognizer.load(model_path)

        # construct and return the face recognizer
        return FaceRecognizer(recognizer, trained=fri.trained, labels=fri.labels)
Beispiel #30
0
	def __init__(self, algorithm='fisherface'):
		self.algorithm = algorithm
		if (algorithm == 'fisherface'):
			self.model = cv2.createFisherFaceRecognizer()
		elif (algorithm == 'eigenface'):
			self.model = cv2.createEigenFaceRecognizer()
		elif (algorithm == 'lbphf'):
			self.model = cv2.createLBPHFaceRecognizer()
		else:
			raise ValueError('Unknown algorithm: "%s"!' % (algorithm,))
Beispiel #31
0
    haarcascade_eye_tree_eyeglasses.xml   haarcascade_mcs_leftear.xml
    haarcascade_eye.xml                   haarcascade_mcs_lefteye.xml
    haarcascade_frontalface_alt2.xml      haarcascade_mcs_mouth.xml
    haarcascade_frontalface_alt_tree.xml  haarcascade_mcs_nose.xml
    haarcascade_frontalface_alt.xml       haarcascade_mcs_rightear.xml
    haarcascade_frontalface_default.xml   haarcascade_mcs_righteye.xml
    haarcascade_fullbody.xml              haarcascade_mcs_upperbody.xml
    haarcascade_lefteye_2splits.xml       haarcascade_profileface.xml
    haarcascade_lowerbody.xml             haarcascade_righteye_2splits.xml
    haarcascade_mcs_eyepair_big.xml       haarcascade_smile.xml
    haarcascade_mcs_eyepair_small.xml     haarcascade_upperbody.xml
'''

cascade_path = "haarcascade_frontalface_default.xml"
face_cascade = cv2.CascadeClassifier(cascade_path)
recognizer = cv2.createLBPHFaceRecognizer(1, 8, 8, 8, 123)


def get_images():
    path = "media"
    image_folder = os.path.abspath(
        os.path.join(os.path.dirname(__file__), path))
    image_paths = [
        os.path.join(image_folder, f) for f in os.listdir(image_folder)
        if f.endswith('.jpg')
    ]
    images = []
    labels = []
    for image_path in image_paths:
        gray = Image.open(image_path).convert('L')
        image = np.array(gray, 'uint8')
 def __init__(self, visitor_faces):
     self.visitors = visitor_faces
     self.model = cv2.createLBPHFaceRecognizer(
         threshold=LBP_RECOGNITION_THRESHOLD)
     self.face_cascade = cv2.CascadeClassifier(CASCADE_FILE)
def LBPHupdate(ID):
	labels=[]
	images=[]
	# make sure this is the right file name
	faceCascade = cv2.CascadeClassifier(cascadePath)
	
	counter=0
	#counter2=0
	foldername=ID;

	#update database
	print 'Loading training data...'
	model=cv2.createLBPHFaceRecognizer()
	model.load(TRAINING_FILE)
	print 'Training data loaded!'

	f=open(CSV_FILE,'r+')
	t=open(LOOKUP_FILE,'r+')
	en=open(ENROLLMENT_FILE,'r+')
	#Get label
	f.seek(-10,2)
	s=f.readline()
	#print s
	list=s.split(';')
	num=str(list[1]).split('\n')
	#new label no.
	label=int(num[0])+1
	#print label

	f.seek(0,2)
	t.seek(0,2)
	en.seek(0,2)

	faces=[]
	labels=[]

	DIRECTORY=foldername
	#print DIRECTORY

	SEPARATOR=";"

	for files in os.listdir(DIRECTORY):
	    abs_path="%s\%s"%(DIRECTORY,files)
	    seq=''.join([str(abs_path),str(SEPARATOR),str(label),'\n'])
	    f.write(seq)
	    
	t.write(''.join([str(DIRECTORY),';',abs_path,';\n']));

	en.write(''.join([str(label),'\n']))

	f.close()
	t.close()
	en.close()

	for filename in walk_files(DIRECTORY,'*.pgm'):
	    #print filename
	    faces.append(prepare_image(filename))
	    labels.append(label)

	model.update(np.asarray(faces), np.asarray(labels))
	#print model

	#Save model results
	model.save(TRAINING_FILE)
	print 'Training data saved to',TRAINING_FILE

	print "successfully updated"
def Authenticate():
	#load lookup table_ ky
	tableName=LOOKUP_FILE
	table=[]
	samples=[]
	load_table(tableName,table,samples)

	# Create window
	cv2.namedWindow("Preview")
	#cv2.namedWindow("Compared")

	# Load training data into model
	print 'Loading training data...'
	model = cv2.createLBPHFaceRecognizer()
	model.load(TRAINING_FILE)
	print 'Training data loaded!'

	confidences=[]
	labels=[]

	camera=PiCamera()
	camera.resolution=(320,240)
	camera.framerate=32
	rawCapture=PiRGBArray(camera,size=(320,240))
	time.sleep(3)

	count=30
	recognition=0
	
	print 'Looking for face...'
	camera.capture(rawCapture,format="bgr",use_video_port=True)
	while rawCapture is not None:
		image=rawCapture.array
		gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
		result=face.detect_single(gray)
		cv2.imshow("Preview",image)
		key=cv2.waitKey(1)
		if result is None:
			print "Please face to the camera "
		else:
			x, y, w, h = result
			# Crop and resize image to face
			crop = face.resize(face.crop(gray, x, y, w, h))
			label, confidence = model.predict(crop)
			confidences.append(confidence)
			labels.append(label)
			cv2.waitKey(1)
			count -= 1
		if count<=0:
			break
		rawCapture.truncate(0)
		camera.capture(rawCapture,format="bgr",use_video_port=True)
		
	print "finish capturing faces"
	camera.close()
	cv2.destroyWindow("Preview")


	temp=[]
	i=0
	length=len(labels)
	while length>0:
		if i==0:
			temp.append(labels[length-1])
			i += 1
			length -= 1
		else:
			tempi=0
			while tempi<i:
				if labels[length-1]!=temp[tempi]:
					tempi += 1
				else:
					length -=1
					break
			if tempi == i:
				temp.append(labels[length-1])
				i += 1
			length -= 1

	print "------LABELS:{}".format(labels)
	print "------DIFFERENT LABELS:{}".format(temp)
	print "------NUMBER OF DIFFERENT LABELS:{}".format(i)

	tempi=0
	numoflabel=0
	if i > 5:
		print "could not enter"
		#print labels
		return 0,-1
	else:
		element=temp[tempi]
		while tempi < i:
			tempj=0
			count=0
			while tempj<len(labels):
				if labels[tempj]==temp[tempi]:
					count += 1
				tempj += 1
			if count > numoflabel :
				numoflabel=count
				element=temp[tempi]
			tempi += 1
		print "element is {}, numoflabel is {}".format(element, numoflabel)


	tempi = 0
	con=0
	while tempi < len(labels):
		if labels[tempi]==element:
			con=con+confidences[tempi]
		tempi += 1
	ave=con/numoflabel

	print "mean of confidences is {}".format(ave)
	print confidences

	# print recognition
	f=open(ENROLLMENT_FILE,'r')
	s=f.readline()
	flag=0
	while s!="":
		index=int(s)
		#print index
		if index==element:
			flag=1
			print "flag TRUE"
			break
		s=f.readline()
	if ave < 50 and flag==1:
		print "authenticated"
		return 1,element
	else:
		print "could not enter"
		return 0,-1
Beispiel #35
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import numpy as np
#import cv2.face
import glob
import random
import sys
import cv2

emotions = ["neutrality", "anger", "fear", "happiness", "sadness", "surprise"]

fishface = cv2.createFisherFaceRecognizer()
eigenface = cv2.createEigenFaceRecognizer()
LBPface = cv2.createLBPHFaceRecognizer()


def detect_target_faces():
    flag = 0
    face_img = cv2.imread('face.jpg')
    face_gray = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    face_gray = clahe.apply(face_gray)
    faceDet = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
    faceDet2 = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
    faceDet3 = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
    faceDet4 = cv2.CascadeClassifier("haarcascade_frontalface_alt_tree.xml")
    face = faceDet.detectMultiScale(face_gray,
                                    scaleFactor=1.1,
                                    minNeighbors=10,
                                    minSize=(5, 5),
Beispiel #36
0
from os.path import isfile,join

data_path = 'D:\\opencv2\\project_code\\code\\DataSet\\'
onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path,f))]

Training_Data,Labels = [],[]

for i,files in enumerate(onlyfiles):
    image_path = data_path  + onlyfiles[i]
    images = cv2.imread(image_path,cv2.IMREAD_GRAYSCALE)
    Training_Data.append(np.asarray(images,dtype = np.uint8))
    Labels.append(i)

Labels = np.asarray(Labels,dtype = np.int32)

model = cv2.createLBPHFaceRecognizer()
model.train(np.asarray(Training_Data),np.asarray(Labels))
print "Model training Done !!"

face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def face_detector(img,size=0.5):
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    faces = face_classifier.detectMultiScale(gray,1.3,5,0)

    if faces is ():
        return img,[]

    for (x,y,w,h) in faces:
        cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        roi = img[y:y+h,x:x+w]
        roi = cv2.resize(roi,(200,200))
Beispiel #37
0
class Detector:

    # Intancias das outras classes
    dbHelper = databaseHelper.DbHelper()
    trainning = trainer.Trainer()

    # Modelo a ser detectado
    haarCascade = '../haarcascade/haarcascade_frontalface_alt.xml'
    faceDetect = cv2.CascadeClassifier(haarCascade)

    # Preparar camera
    camera = cv2.VideoCapture(0)

    # Preparar reconhecedor (raio, vizinhos, tabela_x, tabela_y, confianca)
    recognizer = cv2.createLBPHFaceRecognizer(1, 8, 8, 8, 100)

    # Fontes
    textFont = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL, 1.5, 1, 0,
                               2)
    controlsFont = cv2.FONT_HERSHEY_SIMPLEX

    # Propriedades das amostras
    userIdSample = 0
    userNameSample = ''
    sampleId = 0

    # Flag para ligar o modo captura
    captureMode = False

    # wxPython (GUI)
    dialog = wx.App()

    # Carreagar Treino Padrão
    recognizer.load("trainning\\data.yml")

    # Alerta com input do nome
    def getUserName(parent=None):
        dlg = wx.TextEntryDialog(parent, 'Qual o seu nome?')
        dlg.ShowModal()

        result = dlg.GetValue()

        dlg.Destroy()

        return result

    while (True):
        frame, image = camera.read()
        grayConvert = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = faceDetect.detectMultiScale(grayConvert, 1.3, 5)

        for (x, y, w, h) in faces:
            userId, conf = recognizer.predict(grayConvert[y:y + h, x:x + w])
            userName = dbHelper.getNameById(userId)

            if (userId == -1 or not (userName)):
                userName = "******"

            # Retangulo e nome do usuario
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.cv.PutText(cv2.cv.fromarray(image), userName, (x, y + h),
                           textFont, (0, 0, 255))

            # Iniciar Captura
            if (captureMode == True):
                sampleId = sampleId + 1
                cv2.imwrite(
                    'samples/' + userNameSample + '.' + str(userIdSample) +
                    '.' + str(sampleId) + '.jpg', grayConvert[y:y + h,
                                                              x:x + w])
                cv2.putText(image, 'Capturando...', (420, 30), controlsFont, 1,
                            (255, 255, 255), 2, cv2.CV_AA)
                cv2.waitKey(10)

        # Terminar captura e treinar
        if (sampleId > 20):
            sampleId = 0
            captureMode = False
            trainning.train()

            recognizer = cv2.createLBPHFaceRecognizer(1, 8, 8, 8, 100)
            recognizer.load("trainning\\data.yml")

        # Demais textos
        cv2.putText(image, 'Faculdade Ateneu', (10, 30), controlsFont, 1,
                    (255, 255, 255), 2, cv2.CV_AA)
        cv2.putText(image, 'Controles:', (10, 350), controlsFont, 1,
                    (255, 255, 255), 2, cv2.CV_AA)
        cv2.putText(image, 'N -> Novo rosto', (20, 390), controlsFont, 0.7,
                    (255, 255, 255), 2, cv2.CV_AA)
        cv2.putText(image, 'Q -> Sair', (20, 430), controlsFont, 0.7,
                    (255, 255, 255), 2, cv2.CV_AA)
        cv2.putText(image, 'Criadores: David e Jadson', (420, 450),
                    controlsFont, 0.5, (255, 255, 255), 1, cv2.CV_AA)
        cv2.putText(image, 'Versao: 1.0.0', (420, 470), controlsFont, 0.5,
                    (255, 255, 255), 1, cv2.CV_AA)

        # Tela
        cv2.imshow("Face Detector", image)

        # Sair da aplicação
        if (cv2.waitKey(1) == ord('q')):
            break

        # Nova leitura
        if (cv2.waitKey(1) == ord('n')):
            dialog.MainLoop()
            userNameSample = getUserName()

            if (userNameSample):
                captureMode = True

                dbHelper.firstOrUpdate(userNameSample)

                userIdSample = dbHelper.getIdByName(userNameSample)

    camera.release()
    cv2.destroyAllWindows()
Beispiel #38
0
def main_loop(csv_file, use_ff=False):
    # Open the cascade file and create the classifier
    cascade_file = 'cascades/lbpcascade_frontalface.xml'
    lbp_cascade = cv2.CascadeClassifier(cascade_file)

    # Parse the CSV file
    images_gray, labels, names = parse_csv(csv_file)

    # Create the appropriate model for recognition
    if use_ff:
        model = cv2.createFisherFaceRecognizer()
    else:
        model = cv2.createLBPHFaceRecognizer(
            threshold=LBP_RECOGNITION_THRESHOLD)

    # Train the model with the gray-scale images and the labels
    model.train(images_gray, labels)

    # Open the default capture device (webcam)
    capture = cv2.VideoCapture(-1)

    while True:
        _, frame = capture.read()

        key = 0xFF & cv2.waitKey(10)
        if key == 27:
            break

        elif key == 13 or key == 10:
            # Convert the captured frame to grayscale
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # Try to detect faces
            faces = lbp_cascade.detectMultiScale(gray, minSize=(30, 30))

            for (x, y, w, h) in faces:
                # Crop to the face Region of Interest
                face_roi = gray[y:y + h, x:x + w]

                # Only for fisher faces and eigen faces resize the face
                face_resized = cv2.resize(face_roi, (92, 112), 1, 1,
                                          cv2.INTER_CUBIC)
                if use_ff:
                    # face_resized = cv2.resize(face_roi, (92, 112), 1, 1, cv2.INTER_CUBIC)
                    prediction, confidence = model.predict(face_resized)
                else:
                    # prediction, confidence = model.predict(face_roi)
                    prediction, confidence = model.predict(face_resized)

                name = "Desconocido" if prediction < 0 else names[prediction]

                # Draw the predicted name and confidence above the face
                text = "Prediction = %s Confidence = %s" % (name, confidence)
                cv2.putText(frame, text, (x - 15, y - 15),
                            cv2.FONT_HERSHEY_PLAIN, 1.0, BLUE, 2)

                # Draw a rectangle around the face
                cv2.rectangle(frame, (x, y), (x + w, y + h), GREEN, 1)

                cv2.imshow('Processed face', face_resized)

            cv2.imshow('Face', frame)

        cv2.imshow('LBPTest', frame)
Beispiel #39
0
import numpy as np
import cv2

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

cap = cv2.VideoCapture(0)
rec = cv2.createLBPHFaceRecognizer(2, 130)
rec.load("recognizer\\trainingData.yml")
id = 0
font = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL, 3, 1, 0, 3)

while 1:
    ret, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
        id, conf = rec.predict(gray[y:y + h, x:x + w])
        if (id == 1):
            id = "Grish"
        elif (id == 2):
            id = "UnknowN"
        elif ():
            id = "unknown"
        cv2.cv.PutText(cv2.cv.fromarray(img), str(id), (x, y + h), font, 255)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = img[y:y + h, x:x + w]
#For Live Video from Webcams
import cv2
import os
import numpy as np
import sys
recognizer=cv2.createLBPHFaceRecognizer()
recognizer.load("TrainData.yml")# loading Trained Data
faceCascade=cv2.CascadeClassifier("haarcascade_frontalface_default.xml");

test = cv2.imread(imageName,cv2.IMREAD_GRAYSCALE)#converting to gray scale
faces=faceCascade.detectMultiScale(test)
#font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL,2,1,0,2)
i=0
lab=[]
cap=cv2.VideoCapture(0)#Video cam id

while(True):
	ret,frame=cap.read()
	#test=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
	test = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	faces=faceCascade.detectMultiScale(test)
        for(x,y,w,h) in faces:		
                if(w>80 and h>80):
                	predicted_label,confidence=recognizer.predict(test[y:y+h,x:x+w])
                        if  confidence<100:		
                                lab.append(predicted_label)
			#cv2.rectangle(test,(x,y),(x+w,y+h),(255,255,255),2)
			#cv2.cv.PutText(cv2.cv.fromarray(test),result,(x,y+h),font,255);
lab.sort()
lines=[]
file=open("FindLabel.txt","r+")# correspoinding names for the labels is saved in FindLabel.txt
Beispiel #41
0
from imutils import encodings
import numpy as np
import argparse
import random
import glob
import cv2

# construct the argument parse and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--selfies", required=True, help="path to the selfies directory")
ap.add_argument("-c", "--classifier", required=True, help="path to the output classifier directory")
ap.add_argument("-n", "--sample-size", type=int, default=100, help="maximum sample size for each face")
args = vars(ap.parse_args())

# initialize the face recognizer and the list of labels
fr = FaceRecognizer(cv2.createLBPHFaceRecognizer(radius=1, neighbors=8, grid_x=8, grid_y=8))
labels = []

# loop over the input faces for training
for (i, path) in enumerate(glob.glob(args["selfies"] + "/*.txt")):
	# extract the person from the file name,
	name = path[path.rfind("/") + 1:].replace(".txt", "")
	print("[INFO] training on '{}'".format(name))

	# load the faces file, sample it, and initialize the list of faces
	sample = open(path).read().strip().split("\n")
	sample = random.sample(sample, min(len(sample), args["sample_size"]))
	faces = []

	# loop over the faces in the sample
	for face in sample:
Beispiel #42
0
    def __init__(self, script_path):
        shutil.rmtree(
            '/home/ros/robocup/src/beginner_tutorials/launch/dataset')
        shutil.rmtree('/home/ros/robocup/src/beginner_tutorials/launch/person')
        shutil.rmtree(
            '/home/ros/robocup/src/beginner_tutorials/launch/recognizer')
        os.mkdir('/home/ros/robocup/src/beginner_tutorials/launch/dataset')
        os.mkdir('/home/ros/robocup/src/beginner_tutorials/launch/person')
        rospy.loginfo("In the __init__()")
        #initialize the node.
        rospy.init_node('who is who')
        rospy.on_shutdown(self.cleanup)
        #set the voice
        self.voice = rospy.get_param("~Voice", "voice_don_diphone")
        self.wavepath = rospy.get_param("~wavepath",
                                        script_path + "/../sounds")
        self.soundhandle = SoundClient()
        self.exit_point = PoseStamped()
        rospy.sleep(1)
        self.soundhandle.stopAll()
        rospy.sleep(3)
        self.sub = rospy.Subscriber('/start', String, self.getin)
        self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
        self.sub2 = rospy.Subscriber('/odom', Odometry, self.get_x)
        #send the sign of ready.
        self.soundhandle.say("I am ready", self.voice)
        rospy.loginfo("Say one of the navigation commands...")
        #All subscribers and publishers are defined here
        rospy.Subscriber('/recognizer/output', String, self.talkback)
        self.sub_begin = rospy.Subscriber("begin", Int16, self.callback_begin)
        self.voice_vel_pub = rospy.Publisher('who_voice', String, queue_size=5)
        self.point_pub = rospy.Publisher('move_base_simple/goal',
                                         PoseStamped,
                                         queue_size=5)
        self.peopleposepub = rospy.Publisher("people_pose_info", PeoplePose)
        self.x = 0
        #the word dictionary
        #need to edit
        self.keywords_to_command = {
            'water': ['water'],
            'coffee': ['coffee'],
            'red bull': ['red bull', 'red', 'bull'],
            'cola': ['cola'],
            'paper': ['paper'],
            'michael': ['michael'],
            'jack': ['jack'],
            'fisher': ['fisher'],
            'kevin': ['kevin'],
            'daniel': ['daniel'],
            'yes': ['yes'],
            'no': ['no']
        }
        self.srcfile = '/home/ros/robocup/src/beginner_tutorials/launch/WhoIsWho_yaml/trainningdata.yml'
        self.dstfile = '/home/ros/robocup/src/beginner_tutorials/launch/recognizer/trainningdata.yml'
        self.copyfile()
        self.name = [[0 for i in range(2)] for i in range(6)]
        self.position = [[0 for i in range(3)] for i in range(6)]

        self.flag_begin = 0
        self.people_num = 0
        self.tempname = ''
        self.talk_flag = 0
        self.frame = None
        self.array = None
        self.face_rects = None
        self.face_rects_judge = None
        self.scaling_factor = 1.0
        self.num = 0
        self.i = 0
        self.a = 0
        self.end = 450
        self.flag_face_detect = 0
        self.recognize = 0

        self.recognizer = cv2.createLBPHFaceRecognizer()
        self.face_cascade = cv2.CascadeClassifier(
            '/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
        self.begin = time.time()

        self.quaternion_get_into_the_door = [0, 0, 0.016557, -0, 999863]
        self.quaternion_get_into_the_room = [0, 0, 0.003117, -0.999995]
        self.pos_get_into_the_door = [11.18065, 5.967353, 0]
        self.pos_get_into_the_room = [12.8934, 3.3162, 0]
        self.euler_point = [11.9269, 3.2924, 0]
        self.quaternion_go_to_the_exit = [0, 0, 0.999756, -0.022053]  #gai
        self.pos_go_to_the_exit = [10.616634, 0.949925, 0]  #gai

        #initialize move_base
        self.move_base_client = actionlib.SimpleActionClient(
            'move_base', MoveBaseAction)
        connected_befor_timeout = self.move_base_client.wait_for_server(
            rospy.Duration(2.0))
        if connected_befor_timeout:
            rospy.loginfo('succeeded connecting to move_base server')
        else:
            rospy.logerr('failed connecting to move_base server')
            return
        rospy.wait_for_service('move_base/clear_costmaps', 5.0)
        self.service_clear_costmap_client = rospy.ServiceProxy(
            'move_base/clear_costmaps', Empty)
        rospy.loginfo('connected to move_base/clear_costmaps')
        rospy.loginfo('node initialized')
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import cv2  # OpenCV Library
import lib.face as face
import lib.config as config
import time
import os

# Load training data into model
print('Loading training data...')

if config.RECOGNITION_ALGORITHM == 1:
    print "ALGORITHM: LBPH"
    model = cv2.createLBPHFaceRecognizer(threshold=config.POSITIVE_THRESHOLD)
elif config.RECOGNITION_ALGORITHM == 2:
    print "ALGORITHM: Fisher"
    model = cv2.createFisherFaceRecognizer(threshold=config.POSITIVE_THRESHOLD)
else:
    print "ALGORITHM: Eigen"
    model = cv2.createEigenFaceRecognizer(threshold=config.POSITIVE_THRESHOLD)

model.load("training.xml")
print('Training data loaded!')

camera = config.get_camera()

time.sleep(1)  # give the camera a second to warm up
while True:
    # camera video feed
Beispiel #44
0
    reference_faces_paths = sorted(
        glob.glob(args.reference_faces_path + "*.pgm"))
    for path in reference_faces_paths:
        index = int(path[path.rfind('/') + 1:path.rfind('.')])
        reference_faces_indices.append(index)
        face_img = cv2.imread(path)
        reference_faces_images.append(face_img)
    reference_faces = dict(zip(reference_faces_indices,
                               reference_faces_images))

if args.facerec_method == "eigenfaces":
    face_recognizer = cv2.createEigenFaceRecognizer(0, args.facerec_threshold)
elif args.facerec_method == "fisherfaces":
    face_recognizer = cv2.createFisherFaceRecognizer(0, args.facerec_threshold)
elif args.facerec_method == "lbph":
    face_recognizer = cv2.createLBPHFaceRecognizer(1, 8, 8, 8,
                                                   args.facerec_threshold)
else:
    pass
try:
    face_recognizer.load(args.facerec_model_path)
except:
    print "Something went wrong face recognizer model, exiting..."
    sys.exit(1)

train_face_size = (96, 96)

indexes = [
    1, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
    23, 24, 25, 26, 27
]
ok = 0