Exemple #1
0
    def __init__(self, cfg):
        BaseComponent.__init__(self, cfg)

        params = cfg['params']

        models_dir = params['model']
        if not os.path.exists(models_dir):
            raise "Error: Invalid face recognizer model directory path " + models_dir

        strategies = params['strategies']
        if not strategies:
            raise "Error: Invalid pipeline file. Recognizer should specify atleast 1 strategy: eigen|fischer|lbp"

        self.output_label = params['outputlabel']

        if 'eigen' in strategies:
            self.eigen = face.createEigenFaceRecognizer()
            self.eigen.load(os.path.join(models_dir, 'eigen.yml'))
        else:
            if 'eigen' in self.output_label:
                raise "Error: Invalid pipeline file. Recognizer has eigen in output label but not in strategies"

            self.eigen = None

        if 'fischer' in strategies:
            self.fischer = face.createFisherFaceRecognizer()
            self.fischer.load(os.path.join(models_dir, 'fischer.yml'))
        else:
            if 'fischer' in self.output_label:
                raise "Error: Invalid pipeline file. Recognizer has fischer in output label but not in strategies"

            self.fischer = None

        if 'lbp' in strategies:
            self.lbp = face.createLBPHFaceRecognizer()
            self.lbp.load(os.path.join(models_dir, 'lbp.yml'))
        else:
            if 'lbp' in self.output_label:
                raise "Error: Invalid pipeline file. Recognizer has lbp in output label but not in strategies"

            self.lbp = None

        with open(os.path.join(models_dir, 'model.json'), 'r') as model_file:
            self.model = json.load(model_file)
            self.train_img_size = (self.model['height'], self.model['width'])
            self.labels = self.model['labels']

        self.equalize_hist = params.get('equalizehist', False)
def recognize(img_file,
              expected_label,
              models_dir,
              eigen=True,
              fischer=True,
              lbp=True,
              equalize_hist=False):

    eigen_label = fischer_label = lbp_label = -1

    with open(os.path.join(models_dir, 'model.json'), 'r') as model_file:
        model = json.load(model_file)
        train_img_size = (model['height'], model['width'])

    img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
    # If training images were equalized, better to perform the same
    # operation during recognition too.
    if equalize_hist:
        img = cv2.equalizeHist(img)

    if img.shape != train_img_size:
        img = cv2.resize(img, train_img_size[::-1])

    if eigen:
        eigen_recog = face.createEigenFaceRecognizer()
        eigen_recog.load(os.path.join(models_dir, 'eigen.yml'))
        eigen_label = eigen_recog.predict(img)
        print('Eigen done')

    if fischer:
        fischer_recog = face.createFisherFaceRecognizer()
        fischer_recog.load(os.path.join(models_dir, 'fischer.yml'))
        fischer_label = fischer_recog.predict(img)
        print('Fischer done')

    if lbp:
        lbp_recog = face.createLBPHFaceRecognizer()
        lbp_recog.load(os.path.join(models_dir, 'lbp.yml'))
        lbp_label = lbp_recog.predict(img)
        print('LBP done')

    print(eigen_label, fischer_label, lbp_label)
    return eigen_label, fischer_label, lbp_label
def recognizemany(img_file,
                  detector_xml_path,
                  models_dir,
                  dest_img_file,
                  eigen=True,
                  fischer=True,
                  lbp=True,
                  equalize_hist=False):

    img = cv2.imread(img_file)

    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #gray_img = cv2.resize(gray_img, (640, 480))

    detector = cv2.CascadeClassifier(detector_xml_path)

    min_size = (min(50,
                    gray_img.shape[0] // 10), min(50, gray_img.shape[1] // 10))
    #min_size = (0,0)
    hits = detector.detectMultiScale(gray_img, 1.1, 3, 0, min_size)

    eigen_label = fischer_label = lbp_label = -1

    with open(os.path.join(models_dir, 'model.json'), 'r') as model_file:
        model = json.load(model_file)
        train_img_size = (model['height'], model['width'])
        labels = model['labels']

    print('# hits:', len(hits))

    hits_img = np.copy(img)

    # If training images were equalized, better to perform the same
    # operation during recognition too.
    if equalize_hist:
        gray_img = cv2.equalizeHist(gray_img)

    i = 1
    for (x, y, w, h) in hits:
        print('ROI ', i)
        roi = gray_img[y:y + h, x:x + w]
        i += 1

        if roi.shape != train_img_size:
            roi = cv2.resize(roi, train_img_size[::-1])

        if eigen:
            eigen_recog = face.createEigenFaceRecognizer()
            eigen_recog.load(os.path.join(models_dir, 'eigen.yml'))
            eigen_label = eigen_recog.predict(roi)
            print('Eigen done')

        if fischer:
            fischer_recog = face.createFisherFaceRecognizer()
            fischer_recog.load(os.path.join(models_dir, 'fischer.yml'))
            fischer_label = fischer_recog.predict(roi)
            print('Fischer done')

        if lbp:
            lbp_recog = face.createLBPHFaceRecognizer()
            lbp_recog.load(os.path.join(models_dir, 'lbp.yml'))
            lbp_label = lbp_recog.predict(roi)
            print('LBP done')

        cv2.rectangle(hits_img, (x, y), (x + w, y + h), (255, 255, 255), 2)
        cv2.putText(hits_img, labels[str(fischer_label)], (x, y - 5),
                    cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2)

        print(labels[str(eigen_label)], labels[str(fischer_label)],
              labels[str(lbp_label)])
        #return  eigen_label, fischer_label, lbp_label

    cv2.imwrite(dest_img_file, hits_img)
def test(test_csv, models_dir, eigen=True, fischer=True, lbp=True):

    eigen_label = fischer_label = lbp_label = -1

    if eigen:
        eigen_recog = face.createEigenFaceRecognizer()
        eigen_recog.load(os.path.join(models_dir, 'eigen.yml'))

    if fischer:
        fischer_recog = face.createFisherFaceRecognizer()
        fischer_recog.load(os.path.join(models_dir, 'fischer.yml'))

    if lbp:
        lbp_recog = face.createLBPHFaceRecognizer()
        lbp_recog.load(os.path.join(models_dir, 'lbp.yml'))

    with open(os.path.join(models_dir, 'model.json'), 'r') as model_file:
        train_img_size = json.load(model_file)
        train_img_size = (train_img_size['height'], train_img_size['width'])

    #test_imgfiles = np.genfromtxt(test_csv,  delimiter=',', dtype=None, names=['file','label','labelnum'])

    test_imgfiles = []
    with open(test_csv, 'r', encoding='utf-8', newline='') as csvfile:
        reader = csv.reader(csvfile)
        for row in reader:
            f, label, label_idx = row
            test_imgfiles.append(row)

    eigen_correct = fischer_correct = lbph_correct = 0

    for test_imgfile in test_imgfiles:

        img = cv2.imread(test_imgfile[0], cv2.IMREAD_GRAYSCALE)

        if img.shape != train_img_size:
            img = cv2.resize(img, train_img_size[::-1])

        expected_label = int(test_imgfile[2])

        eigen_label, eigen_conf = eigen_recog.predict(img) if eigen else (-1,
                                                                          0)
        eigen_correct += 1 if eigen_label == expected_label else 0

        fischer_label, fischer_conf = fischer_recog.predict(
            img) if fischer else (-1, 0)
        fischer_correct += 1 if fischer_label == expected_label else 0

        lbp_label, lbp_conf = lbp_recog.predict(img) if lbp else (-1, 0)
        lbph_correct += 1 if lbp_label == expected_label else 0

        print("%s: expected=%d | eigen=%d | fischer=%d | lbph=%d\n" %
              (test_imgfile[0], expected_label, eigen_label, fischer_label,
               lbp_label))

    if eigen:
        print("Eigenfaces accuracy: ", eigen_correct / len(test_imgfiles))

    if fischer:
        print("Fischerfaces accuracy: ", fischer_correct / len(test_imgfiles))

    if lbp:
        print("LBPH accuracy: ", lbph_correct / len(test_imgfiles))
def train(csv_file,
          train_percent,
          test_file_csv,
          models_dir,
          eigen=True,
          fischer=True,
          lbp=True):

    # OMG np.genfromtxt is horribly broken when moving from py2 to py3 because it returns byte arrays in py3
    # and nothing else can handle byte arrays properly without other conversion hacks.
    # Whatever happened to the "pythonic" way?! Avoid!
    # data = np.genfromtxt(csv_file,  delimiter=',', dtype=None, names=['file','label','labelnum'])
    data = []
    all_labels = {}
    label_counts = {}
    labelnum_col = []
    with open(csv_file, 'r', encoding='utf-8', newline='') as csvfile:
        reader = csv.reader(csvfile)
        for row in reader:
            f, label, label_idx = row
            labelnum_col.append(label_idx)
            data.append(row)
            if all_labels.get(label_idx) is None:
                all_labels[label_idx] = label
                label_counts[label_idx] = 1
            else:
                label_counts[label_idx] += 1

    # Every label should have atleast 2 data points. Delete those rows which don't
    # satisfy that condition.
    data = [d for d in data if label_counts[d[2]] >= 2]
    labelnum_col = [d[2] for d in data]
    train_imagefiles, test_imagefiles = train_test_split(
        data, train_size=train_percent / 100.0, stratify=labelnum_col)

    with open(test_file_csv, 'w', encoding='utf-8') as csvfile:
        testwriter = csv.writer(csvfile, delimiter=',')

        for test_imgfile in test_imagefiles:
            testwriter.writerow(list(test_imgfile))

    training_labels = np.array([d[2] for d in train_imagefiles],
                               dtype=np.int32)

    train_images = []
    for train_imgfile in train_imagefiles:
        #f = train_imgfile[0].decode("utf-8")
        print(f)
        img = cv2.imread(train_imgfile[0], cv2.IMREAD_GRAYSCALE)
        print(f, img.shape)
        train_images.append(img)

    if not os.path.exists(models_dir):
        os.makedirs(models_dir)

    print(train_images[0].shape, len(training_labels))

    if eigen:
        eigen_recog = face.createEigenFaceRecognizer()
        eigen_recog.train(train_images, training_labels)
        eigen_recog.save(os.path.join(models_dir, 'eigen.yml'))
        print('Eigen done')

    if fischer:
        fischer_recog = face.createFisherFaceRecognizer()
        fischer_recog.train(train_images, training_labels)
        fischer_recog.save(os.path.join(models_dir, 'fischer.yml'))
        print('Fischer done')

    if lbp:
        lbp_recog = face.createLBPHFaceRecognizer()
        lbp_recog.train(train_images, training_labels)
        lbp_recog.save(os.path.join(models_dir, 'lbp.yml'))
        print('LBP done')

    # Record the training image dimensions because at prediction time we need to resize images
    # to those dimensions.
    model = {
        'width': train_images[0].shape[1],
        'height': train_images[0].shape[0],
        'labels': all_labels
    }
    with open(os.path.join(models_dir, 'model.json'), 'w') as model_file:
        json.dump(model, model_file, indent=4, separators=(',', ': '))
#!/usr/bin/env python

import argparse
import cv2
import cv2.face as face

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Recognize an image against a trained data set (currently EigenFaceRecognizer)')
    parser.add_argument('training_filename', help='The training data generated by 2.train.py')
    parser.add_argument('image_filename', help='The image containing the face to recognize')
    parser.add_argument('-n', '--num-components', type=int, default=10, help='Number of components used in training (default: 10)')
    parser.add_argument('-t', '--threshold', type=float, default=12.5, help='Confidence threshold (default: 12.5)')

    args = parser.parse_args()

    model = face.createEigenFaceRecognizer(args.num_components, args.threshold)
    model.load(args.training_filename)

    # Load the image to recognize. Must be greyscale
    predict_img = cv2.imread(args.image_filename, cv2.IMREAD_GRAYSCALE)

    label, confidence = model.predict(predict_img)

    print("The predicted label was: %d" % label)
    print("The confidence was: %f" % confidence)
                testingdict[csvrow['path']] = (img, int(csvrow['id']))
    elif ratio:
        ## Separate some images, to be used for testing. Do not train on them.
        testsize = int(math.floor(ratio*TOTAL_IMAGES))
        for i in range(testsize):
            randpath = random.choice(trainingdict.keys())
            randimage = trainingdict[randpath][0]
            randid = trainingdict[randpath][1]  
            testingdict[randpath] = (randimage, randid)
            trainingdict.pop(randpath, None)


    ## Train Eigenfaces, by feeding it face data
    trainingimgs, trainingids = zip(*trainingdict.values()) 
    testingimgs, testingids = zip(*testingdict.values()) 
    model = cv2face.createEigenFaceRecognizer()
    model.train(numpy.array(trainingimgs), numpy.array(trainingids))

    ## Test our model and see how accurate we are
    correct = 0
    iscorrect = False
    for path,x in testingdict.iteritems():
        img = x[0]
        actualid = x[1]
        predictionid = model.predict(numpy.array(img))
        if predictionid == actualid: # We got it!
            correct += 1
            iscorrect = True
        else: # Dang it!
            predictionpath = "" # BUG: If no id is found, cause they all went into testing, this will fail.
            for path2,x2 in trainingdict.iteritems():