Exemple #1
0
def train1():
    import os
    import cv2
    import dlib
    import glob

    # 训练68个特征点

    current_path = os.getcwd()
    faces_path = current_path + '/examples/faces'

    # 训练部分
    # 参数设置
    options = dlib.shape_predictor_training_options()
    options.oversampling_amount = 300
    options.nu = 0.05
    options.tree_depth = 2
    options.be_verbose = True

    # 导入打好了标签的xml文件
    training_xml_path = os.path.join(faces_path,
                                     "training_with_face_landmarks.xml")
    # 进行训练,训练好的模型将保存为predictor.dat
    dlib.train_shape_predictor(training_xml_path, "predictor.dat", options)
    # 打印在训练集中的准确率
    print("\nTraining accuracy:{0}".format(
        dlib.test_shape_predictor(training_xml_path, "predictor.dat")))

    # 导入测试集的xml文件
    testing_xml_path = os.path.join(faces_path,
                                    "testing_with_face_landmarks.xml")
    # 打印在测试集中的准确率
    print("\Testing accuracy:{0}".format(
        dlib.test_shape_predictor(testing_xml_path, "predictor.dat")))
 def test_model(self, testfolder_path):
     """!
     Teste le modèle obtenu
     @param testfolder_path path+"test.xml"
     """
     print("Testing error (average pixel deviation): {}".format(
         dlib.test_shape_predictor(testfolder_path, self.path_create_model +
                                   "predictor.dat")))
def test_shape_predictor_params(treeDepth, nu, cascadeDepth, featurePoolSize,
                                numTestSplits, oversamplingAmount,
                                oversamplingTransJitter, padding, lambdaParam):
    # Grab the default options for dlib's shape predictor and then set the values
    # based on the current hyperparameter values, casting to ints when appropriate
    options = dlib.shape_predictor_training_options()
    options.tree_depth = int(treeDepth)
    options.nu = nu
    options.cascade_depth = int(cascadeDepth)
    options.feature_pool_size = int(featurePoolSize)
    options.num_test_splits = int(numTestSplits)
    options.oversampling_amount = int(oversamplingAmount)
    options.oversampling_translation_jitter = oversamplingTransJitter
    options.feature_pool_region_padding = padding
    options.lambda_param = lambdaParam

    # Tell dlib to be verbose when training and utilize our supplied number of threads when training
    options.be_verbose = True
    options.num_threads = procs

    # Display the current set of options to our terminal
    print("[INFO] Starting training process...")
    print(options)
    sys.stdout.flush()

    # Train the model using the current set of hyperparameters
    dlib.train_shape_predictor(config.TRAIN_PATH, config.TEMP_MODEL_PATH,
                               options)

    # Take the newly trained shape predictor model and evaluate it on both the training and testing set
    trainingError = dlib.test_shape_predictor(config.TRAIN_PATH,
                                              config.TEMP_MODEL_PATH)
    testingError = dlib.test_shape_predictor(config.TEST_PATH,
                                             config.TEMP_MODEL_PATH)

    # Display the training and testing errors for the current trial
    print("[INFO] train error: {}".format(trainingError))
    print("[INFO] test error: {}".format(testingError))
    sys.stdout.flush()

    # Return the error on the testing set
    return testingError
    def train_model(self, trainfolder_path):
        """!
        Lance l'apprentissage du modèle avec les valeurs par défaut
        @param trainfolder : path+"train.xml"
        """
        # self.parameter_model([500,6],0.6,1,18,700,40,500)

        dlib.train_shape_predictor(trainfolder_path,
                                   self.path_create_model + "predictor.dat",
                                   self.options)

        return "Training error (average pixel deviation): {}".format(
            dlib.test_shape_predictor(
                trainfolder_path, self.path_create_model + "predictor.dat"))
def test_params(cascade_depth, padding, nu, tree_depth,
                num_trees_per_cascade_level, lambda_param, jitter):
    options = dlib.shape_predictor_training_options()
    options.feature_pool_region_padding = padding
    options.cascade_depth = int(cascade_depth)
    options.nu = nu
    options.tree_depth = int(tree_depth)
    options.oversampling_translation_jitter = jitter

    options.num_trees_per_cascade_level = int(num_trees_per_cascade_level)
    options.lambda_param = lambda_param
    options.num_threads = 4
    options.be_verbose = True

    print("start training")
    print(options)
    sys.stdout.flush()
    dlib.train_shape_predictor(training_xml_path, "bbr_predictor.dat", options)
    print("\nTraining error: ",
          dlib.test_shape_predictor(training_xml_path, "bbr_predictor.dat"))
    err = dlib.test_shape_predictor(testing_xml_path, "bbr_predictor.dat")
    print("\nTesting error: ", err)
    sys.stdout.flush()
    return err
Exemple #6
0
# to a high amount (300) effectively boosts the training set size, so
# that helps this example.
# options.oversampling_amount = 300
# I'm also reducing the capacity of the model by explicitly increasing
# the regularization (making nu smaller) and by using trees with
# smaller depths.
# options.nu = 0.05
# options.tree_depth = 2
options.be_verbose = True

# dlib.train_shape_predictor() does the actual training.  It will save the
# final predictor to predictor.dat.  The input is an XML file that lists the
# images in the training dataset and also contains the positions of the face
# parts.
training_xml_path = os.path.join(image_folder,
                                 "4Dlib_training_images_with_landmarks.xml")
dlib.train_shape_predictor(training_xml_path, output_folder + "/predictor.dat",
                           options)

# Now that we have a model we can test it.  dlib.test_shape_predictor()
# measures the average distance between a face landmark output by the
# shape_predictor and where it should be according to the truth data.
print("\nTraining error: {}".format(
    dlib.test_shape_predictor(training_xml_path,
                              output_folder + "/predictor.dat")))
# The real test is to see how well it does on data it wasn't trained on.  We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good.  Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
def measure_model_error(model, xml):
    '''requires: the model and xml path.
    It measures the error of the model on the given
    xml file of annotations.'''
    error = dlib.test_shape_predictor(xml, model)
    print("Error of the model: {} is {}".format(model, error))
options = dlib.shape_predictor_training_options()
options.cascade_depth = 10
options.num_trees_per_cascade_level = 500
options.tree_depth = 4
options.nu = 0.1
options.oversampling_amount = 20
options.feature_pool_size = 400
options.feature_pool_region_padding = 0
options.lambda_param = 0.1
options.num_test_splits = 20

options.be_verbose = True

trainingXMLPath = os.path.join(fldDataDir, "train_face_landmarks_70.xml")
testingXMLPath = os.path.join(fldDataDir, "test_face_landmarks_70.xml")

outputModelPath = os.path.join(fldDataDir, modelName)
print("entering")
if (os.path.exists(trainingXMLPath) and os.path.exists(testingXMLPath)):
    dlib.train_shape_predictor(trainingXMLPath, outputModelPath, options)

    print("\nTraining Accuracy: {}".format(
        dlib.test_shape_predictor(trainingXMLPath, outputModelPath)))
    print("\nTesting Accuracy: {}".format(
        dlib.test_shape_predictor(testingXMLPath, outputModelPath)))

else:
    print('training and test XML files not found.')
    print('Please check paths:')
    print('train: {}'.format(trainingXmlPath))
    print('test: {}'.format(testingXmlPath))
# -*- coding: utf-8 -*-

from glob import glob
import os

import dlib
import cv2


detector = dlib.simple_object_detector('../materials/recursos/clock_detector.svm')
detector_points = dlib.shape_predictor('../materials/recursos/detector_clock_points.dat')

print(dlib.test_shape_predictor('../materials/recursos/clock_points_test.xml', '../materials/recursos/detector_clock_points.dat'))

def print_points(image, points):
    for p in points.parts():
        cv2.circle(image, (p.x, p.y), 2, (255, 0, 0), 3)
        

for file in glob(os.path.join('../materials/relogios_teste', '*.jpg')):
    image = cv2.imread(file)
    detected_objects = detector(image, 2)
    
    for _object in detected_objects:
        l, t, r, b = int(_object.left()), int(_object.top()), int(_object.right()), int(_object.bottom())
        cv2.rectangle(image, (l, t), (r, b), (0, 0, 255), 2)
        points = detector_points(image, _object)
        print_points(image, points)
    
    cv2.imshow('Detector points', image)
    cv2.waitKey(0)
Exemple #10
0
import dlib

options = dlib.shape_predictor_training_options()
options.feature_pool_region_padding = 0.1
options.cascade_depth = 10
options.landmark_relative_padding_mode = False
options.nu = 0.05
options.tree_depth = 2
options.oversampling_translation_jitter = 0.1
options.oversampling_amount = 200
options.num_threads = 4
options.be_verbose = True

dlib.train_shape_predictor("face_landmarking.xml", "landmark_predictor.dat",
                           options)
print(
    "\nTraining error: ",
    dlib.test_shape_predictor("face_landmarking.xml",
                              "landmark_predictor.dat"))
# I'm also reducing the capacity of the model by explicitly increasing
# the regularization (making nu smaller) and by using trees with
# smaller depths.
options.nu = 0.05
options.tree_depth = 5  #default 3
options.be_verbose = True
options.num_threads = 4  #cpu core number
# dlib.train_shape_predictor() does the actual training.  It will save the
# final predictor to predictor.dat.  The input is an XML file that lists the
# images in the training dataset and also contains the positions of the face
# parts.
training_xml_path = os.path.join(faces_folder, "labels_cfrs_train.xml")
dlib.train_shape_predictor(training_xml_path,
                           "cfrs_shape_predictor_face_landmarks.dat", options)

# Now that we have a model we can test it.  dlib.test_shape_predictor()
# measures the average distance between a face landmark output by the
# shape_predictor and where it should be according to the truth data.
print("\nTraining accuracy: {}".format(
    dlib.test_shape_predictor(training_xml_path,
                              "cfrs_shape_predictor_face_landmarks.dat")))
# The real test is to see how well it does on data it wasn't trained on.  We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good.  Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
testing_xml_path = os.path.join(faces_folder, "labels_cfrs_test.xml")
print("Testing accuracy: {}".format(
    dlib.test_shape_predictor(testing_xml_path,
                              "cfrs_shape_predictor_face_landmarks.dat")))
Exemple #12
0
import os,sys,logging
import glob
import dlib
from skimage import io

#Setting up debug states
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)

# make sure there is a path to img folder in the argument
if len(sys.argv) != 3:
    logging.error( "********** Please give a path to the face images direcotry then the landmark_predictor dat for the argument! **********")
    exit()

# load the img foldfer to a variable
img_folder = sys.argv[1]

predictorPath = sys.argv[2]

xml_name = raw_input("-----------xml file name (including .xml): ")

testing_xml_path = os.path.join(img_folder, xml_name)

print("\n Testing Accuracy: {}".format( dlib.test_shape_predictor(testing_xml_path, predictorPath)))

dlib.hit_enter_to_continue()
# USAGE
# python tune_predictor_hyperparams.py
# import the necessary packages
from pyimagesearch import config
from sklearn.model_selection import ParameterGrid
import multiprocessing
import numpy as np
import random
import time
import dlib
import cv2
import os
import cProfile
import re

dlib.test_shape_predictor(config.TEST_PATH, config.TEMP_MODEL_PATH)
def evaluate_model_acc(xmlPath, predPath):
   # compute and return the error (lower is better) of the shape
   # predictor over our testing path
   return dlib.test_shape_predictor(xmlPath, predPath)
Exemple #15
0
# Now let's train the bounding box regression model using the dataset we just made.

import dlib

training_xml_file = "images/small_face_dataset/faces_600_bbr.xml"

options = dlib.shape_predictor_training_options()
options.num_threads = 4
options.be_verbose = True

# I'll explain how I selected these magic numbers in a few minutes.
options.cascade_depth = 7
options.tree_depth = 2
options.num_trees_per_cascade_level = 277
options.nu = 0.0326222
options.oversampling_amount = 20
options.oversampling_translation_jitter = 0.181914
options.feature_pool_size = 400
options.lambda_param = 0.14798
options.num_test_splits = 20
options.feature_pool_region_padding = 0.108275

dlib.train_shape_predictor(training_xml_file, "bbr_predictor.dat", options)
print("\nTraining error: ",
      dlib.test_shape_predictor(training_xml_file, "bbr_predictor.dat"))
Exemple #16
0
options.oversampling_amount = 5
#
options.be_verbose = True

# dlib.train_shape_predictor() does the actual training.  It will save the
# final predictor to predictor.dat.  The input is an XML file that lists the
# images in the training dataset and also contains the positions of the face
# parts.
training_xml_path = os.path.join(ear_data, "ear_train.xml")
dlib.train_shape_predictor(training_xml_path, "ear_landmark_predictor.dat", options)

# Now that we have a model we can test it.  dlib.test_shape_predictor()
# measures the average distance between a face landmark output by the
# shape_predictor and where it should be according to the truth data.
print("\nTraining accuracy: {}".format(
    dlib.test_shape_predictor(training_xml_path, "ear_landmark_predictor.dat")))
# The real test is to see how well it does on data it wasn't trained on.  We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good.  Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
testing_xml_path = os.path.join(ear_data, "ear_test.xml")
print("Testing accuracy: {}".format(
    dlib.test_shape_predictor(testing_xml_path, "ear_landmark_predictor.dat")))

# Now let's use it as you would in a normal application.  First we will load it
# from disk. We also need to load a face detector to provide the initial
# estimate of the facial location.
# predictor = dlib.shape_predictor("ear_landmark_predictor.dat")
# detector = dlib.get_frontal_face_detector()
#
import os

model_name = "shape_predictor_70_face_landmarks.dat"

options = dlib.shape_predictor_training_options()
options.cascade_depth = 10
options.num_trees_per_cascade_level = 500
options.tree_depth = 4
options.nu = 0.1
options.oversampling_amount = 20
options.feature_pool_size = 400
options.feature_pool_region_padding = 0
options.lambda_param = 0.1
options.num_test_splits = 20

# Tell the trainer to print status messages to the console so we can
# see training options and how long the training will take.
options.be_verbose = True

training_xml_path = "/home/jash/Desktop/JashWork/Advanced-Computer-Vision/data/models/facial_landmark_data/70_points/training_with_face_landmarks.xml"
testing_xml_path = "/home/jash/Desktop/JashWork/Advanced-Computer-Vision/data/models/facial_landmark_data/70_points/testing_with_face_landmarks.xml"
output_model_path = "/home/jash/Desktop/JashWork/Advanced-Computer-Vision/data/models/" + model_name

if os.path.exists(training_xml_path) and os.path.exists(testing_xml_path):
    dlib.train_shape_predictor(training_xml_path, output_model_path, options)

    print("Training error: {}".format(
        dlib.test_shape_predictor(training_xml_path, output_model_path)))
    print("Testing error: {}".format(
        dlib.test_shape_predictor(testing_xml_path, output_model_path)))
    def prepareModel(self):

        a = ModelPoints()
        a.instantiate()
        self.text.configure(state='normal')
        self.text.insert("insert",
                         "\n#########################################")
        self.text.insert("insert", "#########################################")
        self.text.insert(
            "insert", "\n\t\t\tModèle initialisé : " + str(date.today()) +
            " " + str(datetime.now().time()))
        self.text.insert("insert", "\n\n--> Nombre d'images : ")
        self.text.insert("insert",
                         str(len(os.listdir(InterfacePoint.imagefolder_path))))
        self.text.update()
        a.split()
        self.text.insert("insert", "\n\n--> Dossiers train / test crées : ")
        self.text.insert(
            "insert",
            str(len(os.listdir(InterfacePoint.modele_path + "train"))) +
            " images Train + ")
        self.text.insert(
            "insert",
            str(len(os.listdir(InterfacePoint.modele_path + "test"))) +
            " images Test")
        self.text.update()
        c = str(a.options())
        self.text.insert("insert", "\n\n--> Options set :")
        for x in c[c.find("(") + 1:c.find(")")].split(','):
            self.text.insert("insert", "\t" + x + "\n")
        message = "\n\n--> Fitting trees..."
        self.text.insert("insert", message)
        message = "\n\n\t\t\tPLEASE WAIT UNTIL FINISHED !"
        self.text.insert("insert", message)
        self.text.update()
        start = time.time()
        a.train()
        end = time.time()
        self.text.insert(
            "insert", "\n\n--> Time elapsed : " +
            str(timedelta(seconds=round(end - start))) + " s")
        self.text.update()
        self.text.insert(
            "insert", "\n--> Apprentissage terminé\n--> Modèle mis à jour : ")
        self.text.update()
        self.text.insert(
            "insert",
            str(
                round(
                    int(
                        os.path.getsize(InterfacePoint.modele_path +
                                        "predictor.dat")) / 1048000)) + " Mo")
        self.text.insert(
            "insert", "\n--> Training error: {}".format(
                round(
                    dlib.test_shape_predictor(
                        InterfacePoint.trainfolder_path,
                        InterfacePoint.modele_path + "predictor.dat"), 2)))
        self.text.insert("insert", " pixels")
        self.text.insert(
            "insert", "\n\t\t\tModèle finalisé : " + str(date.today()) + " " +
            str(datetime.now().time()))
        self.text.insert("insert",
                         "\n#########################################")
        self.text.insert("insert", "#########################################")
        self.text.update()
        self.text.configure(state='disabled')
# smaller depths.
options.nu = 0.05
options.tree_depth = 2
options.be_verbose = True

# dlib.train_shape_predictor() does the actual training.  It will save the
# final predictor to predictor.dat.  The input is an XML file that lists the
# images in the training dataset and also contains the positions of the face
# parts.
training_xml_path = os.path.join(faces_folder, "training_with_face_landmarks.xml")
dlib.train_shape_predictor(training_xml_path, "predictor.dat", options)

# Now that we have a model we can test it.  dlib.test_shape_predictor()
# measures the average distance between a face landmark output by the
# shape_predictor and where it should be according to the truth data.
print("\nTraining accuracy: {}".format(dlib.test_shape_predictor(training_xml_path, "predictor.dat")))
# The real test is to see how well it does on data it wasn't trained on.  We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good.  Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
testing_xml_path = os.path.join(faces_folder, "testing_with_face_landmarks.xml")
print("Testing accuracy: {}".format(dlib.test_shape_predictor(testing_xml_path, "predictor.dat")))

# Now let's use it as you would in a normal application.  First we will load it
# from disk. We also need to load a face detector to provide the initial
# estimate of the facial location.
predictor = dlib.shape_predictor("predictor.dat")
detector = dlib.get_frontal_face_detector()

# Now let's run the detector and shape_predictor over the images in the faces
Exemple #20
0
ap.add_argument("-n",
                "--num-trees",
                type=int,
                default=500,
                help="number of regression trees (default = 500)",
                metavar='')
args = vars(ap.parse_args())

#Setting up the training parameters
options = dlib.shape_predictor_training_options()
options.num_trees_per_cascade_level = args['num_trees']
options.nu = args['nu']
options.num_threads = args['threads']
options.tree_depth = args['tree_depth']
options.cascade_depth = args['cascade_depth']
options.feature_pool_size = args['feature_pool_size']
options.num_test_splits = args['test_splits']
options.oversampling_amount = args['oversampling']
options.be_verbose = True

#Training the model
train_path = os.path.join('./', args['dataset'])
dlib.train_shape_predictor(train_path, args['out'] + ".dat", options)
print("Training error (average pixel deviation): {}".format(
    dlib.test_shape_predictor(train_path, args['out'] + ".dat")))

#Testing the model (if test data was provided)
if args['test'] is not None:
    test_path = os.path.join('./', args['test'])
    print("Testing error (average pixel deviation): {}".format(
        dlib.test_shape_predictor(test_path, args['out'] + ".dat")))
def evaluate_model_acc(xml_path, pred_path):
    # Compute and return the error (lower is better) of the shape predictor over the testing path
    return dlib.test_shape_predictor(xml_path, pred_path)
Exemple #22
0
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
if not os.path.exists(os.path.join(ip_folder,"detector.dat")):
    os.chdir(os.path.dirname(training_xml_path))

    dlib.train_shape_predictor("training.xml", os.path.join(ip_folder,"detector.dat"), options)



# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
os.chdir(os.path.dirname(training_xml_path))
train_test = dlib.test_shape_predictor("training.xml", os.path.join(ip_folder,"detector.dat").replace("\\","/"))
print("Training accuracy: {}".format(train_test))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
#print("Testing accuracy: {}".format(dlib.test_simple_object_detector(testing_xml_path, os.path.join(current_dir,"detector.dat")))
os.chdir(os.path.dirname(testing_xml_path))
true_test = dlib.test_shape_predictor("testing.xml", os.path.join(ip_folder,"detector.dat").replace("\\","/"))
print("Testing accuracy: {}".format(true_test))

os.chdir(current_dir)
# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
predictor = dlib.shape_predictor(os.path.join(ip_folder,"detector.dat").replace("\\","/"))
detector = dlib.simple_object_detector(os.path.join(ip_folder,"detector.svm").replace("\\","/"))
Exemple #23
0
import dlib
import cv2
import glob
import os

detectorClock = dlib.simple_object_detector("assets/detector_relogios.svm")
clockPointsDetector = dlib.shape_predictor("assets/detector_relogios_pontos.dat")

print(dlib.test_shape_predictor("assets/teste_relogios_pontos.xml", "assets/detector_relogios_pontos.dat"))

def imprimirPoints(image, points):
    for p in points.parts():
        cv2.circle(image, (p.x, p.y), 2, (0, 255, 0))

for arquive in glob.glob(os.path.join("relogios_teste", "*.jpg")):
    image = cv2.imread(arquive)
    objectDetected = detectorClock(image, 2)
    for clock in objectDetected:
        l, t, r, b = (int(clock.left()), int(clock.top()), int(clock.right()), int(clock.bottom()))
        points = clockPointsDetector(image, clock)
        imprimirPoints(image, points)
        cv2.rectangle(image, (l, t), (r, b), (0, 0, 255), 2)

    cv2.imshow("Points Detector :", image)
    cv2.waitKey(0)

cv2.destroyAllWindows()
Exemple #24
0
# USAGE
# python evaluate_shape_predictor.py --predictor eye_predictor.dat --xml ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train_eyes.xml
# python evaluate_shape_predictor.py --predictor eye_predictor.dat --xml ibug_300W_large_face_landmark_dataset/labels_ibug_300W_test_eyes.xml

# import the necessary packages
import argparse
import dlib

# construct the argument parser and parse the arguments
'''
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--predictor", required=True,
	help="path to trained dlib shape predictor model")
ap.add_argument("-x", "--xml", required=True,
	help="path to input training/testing XML file")
args = vars(ap.parse_args())
'''
xml_path = "../cartoon-20201209T065805Z-001/cartoon/Cartoon_face_with_landmarks_test.xml"
model_path = "predictor_points.dat"
# compute the error over the supplied data split and display it to
# our screen
print("[INFO] evaluating shape predictor...")
error = dlib.test_shape_predictor(xml_path, model_path)
print("[INFO] error: {}".format(error))
Exemple #25
0
def measure_model_error(model, xml_annotations):
    """requires: the model and xml path.
    It measures the error of the model on the given
    xml file of annotations."""
    error = dlib.test_shape_predictor(xml_annotations, model)
    print('Error of the model: ' + str(model) + ' is ' + str(error))
Exemple #26
0
import multiprocessing
import os

import dlib

options = dlib.shape_predictor_training_options()
options.tree_depth = 3
options.nu = 0.3
options.cascade_depth = 30
options.feature_pool_size = 800
options.num_test_splits = 20
options.oversampling_amount = 1
options.oversampling_translation_jitter = 0.4
options.be_verbose = True
options.num_threads = multiprocessing.cpu_count()

train_xml_path = os.path.abspath("./youtube_faces_train.xml")
predictor_dat = "./youtube_faces_68_points.dat"
dlib.train_shape_predictor(train_xml_path, predictor_dat, options)

print("\nTraining MAE: {}".format(
    dlib.test_shape_predictor(train_xml_path, predictor_dat)))

test_xml_path = os.path.abspath("./youtube_faces_test.xml")
print("Testing MAE: {}".format(
    dlib.test_shape_predictor(test_xml_path, predictor_dat)))
Exemple #27
0
# USAGE
# python evaluate_shape_predictor.py --predictor eye_predictor.dat --xml ibug_300W_large_face_landmark_dataset/labels_ibug_300W_train_eyes.xml
# python evaluate_shape_predictor.py --predictor eye_predictor.dat --xml ibug_300W_large_face_landmark_dataset/labels_ibug_300W_test_eyes.xml

# import the necessary packages
import argparse
import dlib

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p",
                "--predictor",
                required=True,
                help="path to trained dlib shape predictor model")
ap.add_argument("-x",
                "--xml",
                required=True,
                help="path to input training/testing XML file")
args = vars(ap.parse_args())

# compute the error over the supplied data split and display it to
# our screen
print("[INFO] evaluating shape predictor...")
error = dlib.test_shape_predictor(args["xml"], args["predictor"])
print("[INFO] error: {}".format(error))
def main():

    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    ap.add_argument("-p", "--path", required=True, help="path to dataset")
    ap.add_argument("-v", "--verbose", default=True, help="be_verbose flag")
    ap.add_argument("-cd",
                    "--cascade_depth",
                    type=int,
                    default=10,
                    help="cascade_depth value")
    ap.add_argument("-td",
                    "--tree_depth",
                    type=int,
                    default=4,
                    help="tree_depth value")
    ap.add_argument("-ntpcl",
                    "--num_tress_per_cascade_level",
                    type=int,
                    default=500,
                    help="num_tress_per_cascade_level value")
    ap.add_argument("-nu", "--nu", default=0.1, type=float, help="nu value")
    ap.add_argument("-oa",
                    "--oversampling_amount",
                    type=int,
                    default=20,
                    help="oversampling_amount value")
    ap.add_argument("-fps",
                    "--feature_pool_size",
                    type=int,
                    default=400,
                    help="feature_pool_size value")
    ap.add_argument("-l",
                    "--lambda_param",
                    default=0.1,
                    type=float,
                    help="lambda_param value")
    ap.add_argument("-nts",
                    "--num_test_splits",
                    type=int,
                    default=20,
                    help="num_test_splits value")
    ap.add_argument("-fprp",
                    "--feature_pool_region_padding",
                    default=0,
                    type=int,
                    help="feature_pool_region_padding value")
    ap.add_argument("-rs",
                    "--random_seed",
                    default="",
                    help="random_seed value")
    ap.add_argument("-nt",
                    "--num_threads",
                    default=0,
                    type=int,
                    help="num_threads value")
    ap.add_argument("-m", "--model_name", required=True, help="model_name")
    args = ap.parse_args()

    # In this example we are going to train a face detector based on the small
    # faces dataset in the examples/faces directory.  This means you need to supply
    # the path to this faces folder as a command line argument so we will know
    # where it is.
    faces_folder = args.path

    options = dlib.shape_predictor_training_options()
    # Now make the object responsible for training the model.
    # This algorithm has a bunch of parameters you can mess with.  The
    # documentation for the shape_predictor_trainer explains all of them.
    # You should also read Kazemi's paper which explains all the parameters
    # in great detail.  However, here I'm just setting three of them
    # differently than their default values.  I'm doing this because we
    # have a very small dataset.  In particular, setting the oversampling
    # to a high amount (300) effectively boosts the training set size, so
    # that helps this example.
    # options.oversampling_amount = args.oversampling_amount
    # I'm also reducing the capacity of the model by explicitly increasing
    # the regularization (making nu smaller) and by using trees with
    # smaller depths.
    # options.nu = 0.05
    # options.lambda_param = 0.1
    # options.tree_depth = 5
    # options.be_verbose = True
    # options.num_threads = 0
    # model_name = "predictor_1.dat"
    options.be_verbose = args.verbose
    options.cascade_depth = args.cascade_depth
    options.tree_depth = args.tree_depth
    options.num_tress_per_cascade_level = args.num_tress_per_cascade_level
    options.nu = args.nu
    options.oversampling_amount = args.oversampling_amount
    options.feature_pool_size = args.feature_pool_size
    options.lambda_param = args.lambda_param
    options.num_test_splits = args.num_test_splits
    options.feature_pool_region_padding = args.feature_pool_region_padding
    options.random_seed = args.random_seed
    options.num_threads = args.num_threads
    model_name = args.model_name

    # dlib.train_shape_predictor() does the actual training.  It will save the
    # final predictor to predictor.dat.  The input is an XML file that lists the
    # images in the training dataset and also contains the positions of the face
    # parts.
    training_xml_path = os.path.join(faces_folder,
                                     "labels_ibug_300W_train.xml")
    dlib.train_shape_predictor(training_xml_path, model_name, options)

    # Now that we have a model we can test it.  dlib.test_shape_predictor()
    # measures the average distance between a face landmark output by the
    # shape_predictor and where it should be according to the truth data.
    print("\nTraining accuracy: {}".format(
        dlib.test_shape_predictor(training_xml_path, model_name)))
    # The real test is to see how well it does on data it wasn't trained on.  We
    # trained it on a very small dataset so the accuracy is not extremely high, but
    # it's still doing quite good.  Moreover, if you train it on one of the large
    # face landmarking datasets you will obtain state-of-the-art results, as shown
    # in the Kazemi paper.
    testing_xml_path = os.path.join(faces_folder, "labels_ibug_300W.xml")
    print("Testing accuracy: {}".format(
        dlib.test_shape_predictor(testing_xml_path, model_name)))

    # Now let's use it as you would in a normal application.  First we will load it
    # from disk. We also need to load a face detector to provide the initial
    # estimate of the facial location.
    predictor = dlib.shape_predictor(model_name)
    detector = dlib.get_frontal_face_detector()

    # Now let's run the detector and shape_predictor over the images in the faces
    # folder and display the results.
    print(
        "Showing detections and predictions on the images in the faces folder..."
    )
    win = dlib.image_window()
    for f in glob.glob(os.path.join(faces_folder, "*.jpg")):
        print("Processing file: {}".format(f))
        img = io.imread(f)

        win.clear_overlay()
        win.set_image(img)

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        print("Number of faces detected: {}".format(len(dets)))
        for k, d in enumerate(dets):
            print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                k, d.left(), d.top(), d.right(), d.bottom()))
            # Get the landmarks/parts for the face in box d.
            shape = predictor(img, d)
            print("Part 0: {}, Part 1: {} ...".format(shape.part(0),
                                                      shape.part(1)))
            # Draw the face landmarks on the screen.
            win.add_overlay(shape)

        win.add_overlay(dets)
        dlib.hit_enter_to_continue()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/10/1 10:49
# @Author  : 周文帆小组
# @Site    :
# @File    : face_re.py
# @Software: PyCharm

import dlib, os

current_path = os.getcwd()
faces_path = current_path + '/faces/'

options = dlib.shape_predictor_training_options()
options.oversampling_amount = 300
options.nu = 0.05
options.tree_depth = 2
options.be_verbose = True

train_path = os.path.join(faces_path, 'training_with_face_landmarks.xml')
#利用标记好了的xml文件进行人脸特征检测器训练,xml文件在faces文件夹内
dlib.train_shape_predictor(train_path, 'predictor.dat', options)
#打印检测器的识别精度
print('\nTraining accuracy:{}'.format(
    dlib.test_shape_predictor(train_path, 'predictor.dat')))
Exemple #30
0
options.nu = 0.05
options.tree_depth = 2
options.be_verbose = True

# dlib.train_shape_predictor() does the actual training.  It will save the
# final predictor to predictor.dat.  The input is an XML file that lists the
# images in the training dataset and also contains the positions of the face
# parts.
training_xml_path = os.path.join(faces_folder, "training_with_face_landmarks.xml")
dlib.train_shape_predictor(training_xml_path, "predictor.dat", options)

# Now that we have a model we can test it.  dlib.test_shape_predictor()
# measures the average distance between a face landmark output by the
# shape_predictor and where it should be according to the truth data.
print("\nTraining accuracy: {}".format(
    dlib.test_shape_predictor(training_xml_path, "predictor.dat")))
# The real test is to see how well it does on data it wasn't trained on.  We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good.  Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
testing_xml_path = os.path.join(faces_folder, "testing_with_face_landmarks.xml")
print("Testing accuracy: {}".format(
    dlib.test_shape_predictor(testing_xml_path, "predictor.dat")))

# Now let's use it as you would in a normal application.  First we will load it
# from disk. We also need to load a face detector to provide the initial
# estimate of the facial location.
predictor = dlib.shape_predictor("predictor.dat")
detector = dlib.get_frontal_face_detector()
Exemple #31
0
options.nu = 0.05
options.tree_depth = 2
options.be_verbose = True

# dlib.train_shape_predictor() does the actual training.  It will save the
# final predictor to predictor.dat.  The input is an XML file that lists the
# images in the training dataset and also contains the positions of the face
# parts.
training_xml_path = os.path.join(faces_folder, "training_with_face_landmarks.xml")
dlib.train_shape_predictor(training_xml_path, "predictor.dat", options)

# Now that we have a model we can test it.  dlib.test_shape_predictor()
# measures the average distance between a face landmark output by the
# shape_predictor and where it should be according to the truth data.
print("\nTraining accuracy: {}".format(
    dlib.test_shape_predictor(training_xml_path, "predictor.dat")))
# The real test is to see how well it does on data it wasn't trained on.  We
# trained it on a very small dataset so the accuracy is not extremely high, but
# it's still doing quite good.  Moreover, if you train it on one of the large
# face landmarking datasets you will obtain state-of-the-art results, as shown
# in the Kazemi paper.
testing_xml_path = os.path.join(faces_folder, "testing_with_face_landmarks.xml")
print("Testing accuracy: {}".format(
    dlib.test_shape_predictor(testing_xml_path, "predictor.dat")))

# Now let's use it as you would in a normal application.  First we will load it
# from disk. We also need to load a face detector to provide the initial
# estimate of the facial location.
predictor = dlib.shape_predictor("predictor.dat")
detector = dlib.get_frontal_face_detector()
training_options.feature_pool_region_padding = 0

#training_options.num_threads = 2

#setting the be_verbose setting to true so the training data will be printed out
training_options.be_verbose = True

# the training will take in an xml file with the imgs used for the training dataset
# so we need to create that xml with the imgs in the given folder
xml_name = raw_input("-----------xml file name (including .xml): ")

training_xml_path = os.path.join(img_folder, xml_name)
logging.debug("-Set the img xml")

# now we create the predictor with the settings and imgs
model_name = raw_input("-----------model name (including .dat): ")

dlib.train_shape_predictor(training_xml_path, model_name, training_options)
logging.debug("-Trained the new predictor")

print("\n Training Accuracy: {}".format(
    dlib.test_shape_predictor(training_xml_path, model_name)))

#now we wanna test the trained model with faces diffrent than the ones we used in training
#testing_xml_path = os.path.join(img_folder, "testing.xml")
#logging.debug("-Tested the new predictor")

#print("\n Testing Accuracy: {}".format( dlib.test_shape_predictor(testing_xml_path, "trained_landmark_predictor.dat")))

dlib.hit_enter_to_continue()