Exemple #1
0
    def train(self):
        print("start training:")
        # 注:必须要将unicode转成普通字符串,否则dlib无法读取,这个编码上的bug也是坑了我两个小时
        train_xml_path = str(self.train_xml_path)
        model_path = str(self.model_path)
#         print(train_xml_path)
#         print(model_path)
#         print(chardet.detect(train_xml_path))
#         xml = 'F:\\Python\\Programs\\my_dlib_face_detection_application\\images\\test_object_detector\\cats_train\\cat.xml'
#         model = 'F:\\Python\\Programs\\my_dlib_face_detection_application\\images\\test_object_detector\\cats_train\\detector.svm'
#         print(xml)
#         print(model)
#         print(chardet.detect(xml))
        
        dlib.train_simple_object_detector(train_xml_path, model_path, self.options)
        
        print("Training accuracy: {}".format(dlib.test_simple_object_detector(train_xml_path, model_path)))
        print("The SVM model is saved in {0}".format(model_path))
        
        if self.log:
            self.log.append(u'--'*30)
            self.log.append("Training complete!")
            self.log.append("Training accuracy: {}".format(dlib.test_simple_object_detector(train_xml_path, model_path)))
            self.log.append("The SVM model is saved in {0}".format(model_path))
            self.log.append(u'--'*30)
Exemple #2
0
def test_detector_accuracy(det_name):
    folder = "../data/TrainHOG/"
    coins_folder = folder
    training_xml_path = os.path.join(coins_folder, "traincoins.xml")
    testing_xml_path = os.path.join(coins_folder, "testcoins.xml")
    detective = str(det_name)
    print("")  # Print blank line to create gap from previous output
    print("Training accuracy: {}".format(
        dlib.test_simple_object_detector(training_xml_path, detective)))
    print("Testing accuracy: {}".format(
        dlib.test_simple_object_detector(testing_xml_path, detective)))
    def train_with_dlib(self, obj_folder, detector_path, train_file_name, test_file_name):

        pdb.set_trace()
        logger = MyUtils.Tee("{0}/{1}.log".format(obj_folder, 'run'), 'w')
        logger.write('tee: dlib training called')

        logger.write('dlib training called')
        
        # Now let's do the training.  The train_simple_object_detector() function has a
        # bunch of options, all of which come with reasonable default values.  The next
        # few lines goes over some of these options.
        options = dlib.simple_object_detector_training_options()

        # The trainer is a kind of support vector machine and therefore has the usual
        # SVM C parameter.  In general, a bigger C encourages it to fit the training
        # data better but might lead to overfitting.  You must find the best C value
        # empirically by checking how well the trained detector works on a test set of
        # images you haven't trained on.  Don't just leave the value set at 5.  Try a
        # few different C values and see what works best for your data.
        options.C = 5

        # Tell the code how many CPU cores your computer has for the fastest training.
        options.num_threads = 2
        options.be_verbose = True

        training_xml_path = os.path.join(obj_folder, train_file_name)
        testing_xml_path = os.path.join(obj_folder, test_file_name)
        # This function does the actual training.  It will save the final detector to
        # detector.svm.  The input is an XML file that lists the images in the training
        # dataset and also contains the positions of the face boxes.  To create your
        # own XML files you can use the imglab tool which can be found in the
        # tools/imglab folder.  It is a simple graphical tool for labeling objects in
        # images with boxes.  To see how to use it read the tools/imglab/README.txt
        # file.  But for this example, we just use the training.xml file included with
        # dlib.
        logger.write('start training. saved detector path: ' + detector_path)
        dlib.train_simple_object_detector(training_xml_path, detector_path, options)
        logger.write( 'end training')


        # Now that we have a face detector we can test it.  The first statement tests
        # it on the training data.  It will logger.write((the precision, recall, and then)
        # average precision.
        logger.write("")  # Print blank line to create gap from previous output
        logger.write("Training accuracy: {}".format(
            dlib.test_simple_object_detector(training_xml_path, detector_path)))
        # However, to get an idea if it really worked without overfitting we need to
        # run it on images it wasn't trained on.  The next line does this.  Happily, we
        # see that the object detector works perfectly on the testing images.
        accuracy = dlib.test_simple_object_detector(testing_xml_path, "detector.svm")
        logger.write("Testing accuracy: {}".format(accuracy))
        logger.flush()
        return accuracy
    def evaluate_dlib_detector(self):
        # Now that we have a face detector we can test it.  The first statement tests
        # it on the training data.  It will print(the precision, recall, and then)
        # average precision.
        print("")  # Print blank line to create gap from previous output
        print("MTB detector evaluation:")
        print("Training accuracy: {}".format(dlib.test_simple_object_detector("../data/annotations/bb/training/combined_mtb.xml", "../data/models/detector_mtb.svm")))
        print("Testing accuracy: {}".format(dlib.test_simple_object_detector("../data/annotations/bb/testing/combined_mtb.xml", "../data/models/detector_mtb.svm")))

        print("PED detector evaluation:")
        print("Training accuracy: {}".format(dlib.test_simple_object_detector("../data/annotations/bb/training/combined_ped.xml", "../data/models/detector_ped.svm")))
        print("Testing accuracy: {}".format(dlib.test_simple_object_detector("../data/annotations/bb/testing/combined_ped.xml", "../data/models/detector_ped.svm")))
Exemple #5
0
    def evaluate_dlib_detector(self):
        # Now that we have a face detector we can test it.  The first statement tests
        # it on the training data.  It will print(the precision, recall, and then)
        # average precision.
        print("")  # Print blank line to create gap from previous output
        print("MTB detector evaluation:")
        print("Training accuracy: {}".format(dlib.test_simple_object_detector("../data/training/training_mtb.xml", "../data/models/detector_mtb.svm")))
        # However, to get an idea if it really worked without overfitting we need to
        # run it on images it wasn't trained on.  The next line does this.  Happily, we
        # see that the object detector works perfectly on the testing images.
        print("Testing accuracy: {}".format(dlib.test_simple_object_detector("../data/testing/testing_mtb.xml", "../data/models/detector_mtb.svm")))

        print("PED detector evaluation:")
        print("Training accuracy: {}".format(dlib.test_simple_object_detector("../data/training/training_ped.xml", "../data/models/detector_ped.svm")))
        print("Testing accuracy: {}".format(dlib.test_simple_object_detector("../data/testing/testing_ped.xml", "../data/models/detector_ped.svm")))
Exemple #6
0
def create_svm(request,pk,template_name='create_svm.html'):
     brand = get_object_or_404(Brands, pk=pk)
     data = {}
     data[ 'pk' ] = pk
     if request.method == 'POST':
        svm = request.POST.get('svm')
        cval = request.POST.get('cval')
        paths = Paths.objects.filter( brand_id = pk )
        if paths.exists():
            for pt in paths:
                train_path = pt.train_path
                train_path = os.path.relpath(train_path,settings.MEDIA_ROOT)
                test_path = pt.test_path
                test_path = os.path.relpath(test_path,settings.MEDIA_ROOT)
                svm_path = pt.svm_path
        #cmd = "python 35Hawkdetect.py /var/sites/thirdauth/static/images/companies/Stovekraft\ Private\ Limited\(mohsin\)/idea/test/ cola123.svm jpg"
        print svm_path
        print settings.MEDIA_ROOT+"/"+train_path+"/"
        faces_folder = settings.MEDIA_ROOT+"/"+train_path+"/"
        test_folder = settings.MEDIA_ROOT+"/"+test_path+"/"
        print faces_folder
        #dest = "/var/sites/thirdauth/static/images/companies/Stovekraft Private Limited(mohsin)/Docomo/svm"
        timestr = svm+time.strftime("_%m_%d_%H_%M")+".svm"
        print timestr
        outputpath = str(svm)+".svm"
        cval = cval
        options = dlib.simple_object_detector_training_options()
        options.add_left_right_image_flips = True
        options.C = int(cval)
        options.num_threads = 4
        options.be_verbose = True
        training_xml_path = os.path.join(str(faces_folder), "training.xml")
        testing_xml_path = os.path.join(str(test_folder), "training.xml")
        dlib.train_simple_object_detector(training_xml_path,outputpath, options)
        print("")
        print("Training accuracy: {}".format(
        dlib.test_simple_object_detector(training_xml_path, outputpath)))
        print("Testing accuracy: {}".format(
        dlib.test_simple_object_detector(testing_xml_path, outputpath)))
        result = "Training accuracy: {}"+format(dlib.test_simple_object_detector(training_xml_path, outputpath))+"  Testing accuracy: {}"+format(dlib.test_simple_object_detector(testing_xml_path, outputpath))
        os.rename(str(outputpath),timestr)
        if os.path.exists(str(svm_path)+"/"+str(timestr)):
             os.remove(str(svm_path)+"/"+str(timestr))
        shutil.move("/var/sites/thirdauth/"+str(timestr),str(svm_path))
        Svms(svm_name = str(timestr), brand_id = pk , company_id = brand.company_id ).save()
        return HttpResponse(result)
     else:
        return render(request,template_name, data ,context_instance=RequestContext(request))
Exemple #7
0
def test_params(C, nuclear_norm):
    options = dlib.simple_object_detector_training_options()
    # our dataset is oriented, so definitely don't add in flips.
    options.add_left_right_image_flips = False
    options.C = C
    options.num_threads = 1
    options.be_verbose = False
    options.nuclear_norm_regularization_strength = nuclear_norm
    options.max_runtime_seconds = 5  # SET REALLY SMALL SO THE DEMO DOESN'T TAKE TO LONG, USE BIGGER VALUES FOR REAL USE!!!!!

    dlib.train_simple_object_detector(
        "images/small_face_dataset/cluster_001_train.xml", "detector1_.svm",
        options)

    # You can do a lot here.  Run the detector through
    # dlib.threshold_filter_singular_values() for instance to make sure it
    # learns something that will work once thresholded. We can also add a
    # penalty for having a lot of filters.   Run this program a few times and
    # try out different ways of penalizing the return from test_params() and
    # see what happens.
    result = dlib.test_simple_object_detector(
        "images/small_face_dataset/cluster_001_test.xml", "detector1_.svm")
    print("C = {}, nuclear_norm = {}".format(C, nuclear_norm))
    print("testing accuracy: ", result)
    sys.stdout.flush()
    # For settings with the same average precision, we should prefer smaller C
    # since smaller C has better generalization.
    return result.average_precision - C * 1e-8
Exemple #8
0
    def do_train(self, training_xml_path, testing_xml_path, name_svm,
                 hyperparameters):
        options = dlib.simple_object_detector_training_options()

        options.detection_window_size = int(hyperparameters[0])
        # options.add_left_right_image_flips = True
        options.C = int(hyperparameters[2])
        options.num_threads = int(hyperparameters[1])
        options.epsilon = hyperparameters[3]
        options.be_verbose = True
        dlib.train_simple_object_detector(training_xml_path, name_svm, options)
        print("")  # Print blank line to create gap from previous output
        print("Training accuracy: {}".format(
            dlib.test_simple_object_detector(training_xml_path, name_svm)))
        print("Testing accuracy: {}".format(
            dlib.test_simple_object_detector(testing_xml_path, name_svm)))
        self.detector = name_svm
Exemple #9
0
def training_data(train_xml, test_xml, detectorName, Csvm=5):
    options = dlib.simple_object_detector_training_options()
    options.add_left_right_image_flips = True
    options.C = Csvm
    options.num_threads = 4
    options.be_verbose = True
    dlib.train_simple_object_detector(train_xml, detectorName + ".svm",
                                      options)
    print("Testing accuracy: {}".format(
        dlib.test_simple_object_detector(test_xml, detectorName + ".svm")))
Exemple #10
0
def train():
    options = dlib.simple_object_detector_training_options()
    options.add_left_right_image_flips = True
    options.C = 5
    options.num_threads = 4
    options.be_verbose = True
    options.detection_window_size = 1024
    options.match_eps = 0.1

    training_xml_path = "../pics/train/training-single.xml"
    # training_xml_path = "/home/external/moderation-p**n-detector/oboobs.dlibxml"

    dlib.train_simple_object_detector(training_xml_path, "../boobs.svm", options)

    print("Training accuracy: {}".format(dlib.test_simple_object_detector(training_xml_path, "../boobs.svm")))
def train(training_xml_path, model_file="detector.svm"):

    assert os.path.isfile(training_xml_path)
    assert not os.path.isfile(model_file)

    # Now let's do the training.  The train_simple_object_detector() function has a
    # bunch of options, all of which come with reasonable default values.  The next
    # few lines goes over some of these options.
    options = dlib.simple_object_detector_training_options()
    # Since faces are left/right symmetric we can tell the trainer to train a
    # symmetric detector.  This helps it get the most value out of the training
    # data.
    options.add_left_right_image_flips = True
    # The trainer is a kind of support vector machine and therefore has the usual
    # SVM C parameter.  In general, a bigger C encourages it to fit the training
    # data better but might lead to overfitting.  You must find the best C value
    # empirically by checking how well the trained detector works on a test set of
    # images you haven't trained on.  Don't just leave the value set at 5.  Try a
    # few different C values and see what works best for your data.
    options.C = 10
    # Tell the code how many CPU cores your computer has for the fastest training.
    options.num_threads = 6
    options.epsilon = 0.001
    options.be_verbose = True

    options.detection_window_size = 4096  #(32, 32)
    # options.upsample_limit = 8

    # This function does the actual training.  It will save the final detector to
    # detector.svm.  The input is an XML file that lists the images in the training
    # dataset and also contains the positions of the face boxes.  To create your
    # own XML files you can use the imglab tool which can be found in the
    # tools/imglab folder.  It is a simple graphical tool for labeling objects in
    # images with boxes.  To see how to use it read the tools/imglab/README.txt
    # file.  But for this example, we just use the training.xml file included with
    # dlib.

    print("Goingt to train ...")
    dlib.train_simple_object_detector(training_xml_path, model_file, options)

    # Now that we have a face detector we can test it.  The first statement tests
    # it on the training data.  It will print(the precision, recall, and then)
    # average precision.
    print("")  # Print blank line to create gap from previous output
    print("Training accuracy: {}".format(
        dlib.test_simple_object_detector(training_xml_path, model_file)))
Exemple #12
0
def train():
    options = dlib.simple_object_detector_training_options()
    options.add_left_right_image_flips = True
    options.C = 5
    options.num_threads = 4
    options.be_verbose = True
    options.detection_window_size = 1024
    options.match_eps = 0.1

    training_xml_path = "../pics/train/training-single.xml"
    # training_xml_path = "/home/external/moderation-p**n-detector/oboobs.dlibxml"

    dlib.train_simple_object_detector(training_xml_path, "../boobs.svm",
                                      options)

    print("Training accuracy: {}".format(
        dlib.test_simple_object_detector(training_xml_path, "../boobs.svm")))
Exemple #13
0
def CREATE_SVM_DETECTOR():

    options = dlib.simple_object_detector_training_options()

    options.C = 5
    options.num_threads = 4
    options.be_verbose = True
    # options.add_left_right_image_flips = True

    fname_xml_train = 'data/xml/cat_BHS.xml'
    # fname_xml_test  = 'data/xml/testing.xml'

    dlib.train_simple_object_detector(fname_xml_train, "cat_detector.svm",
                                      options)

    print()
    print("Training accuracy: {}".format(
        dlib.test_simple_object_detector(fname_xml_train, "cat_detector.svm")))
Exemple #14
0
def train(path_xml):
    # objeto contenedor de las opciones para la rutina train_simple_object_detector()
    # todas las opciones vienen con valores por defecto razonables
    # http://dlib.net/python/index.html#dlib.simple_object_detector_training_options

    options = dlib.simple_object_detector_training_options()

    options.C = 6  # parametro C de las SVM, valores grandes pueden llevar al overfitting
    options.add_left_right_image_flips = True  # para objetos simetricos como las caras
    options.be_verbose = True
    options.epsilon = 0.005  # epsilon de detencion, valores pequeños -> accurate training

    # utilizando la herramienta https://imglab.ml

    dlib.train_simple_object_detector(path_xml, "detector.svm", options)

    print("\nTraining accuracy: ",
          dlib.test_simple_object_detector(path_xml, "detector.svm"))
    def step4(self, btn):
        #Based on dlib example:
        # Now let's do the training.  The train_simple_object_detector() function has a
        # bunch of options, all of which come with reasonable default values.  The next
        # few lines goes over some of these options.

        options = dlib.simple_object_detector_training_options()

        # Since faces are left/right symmetric we can tell the trainer to train a
        # symmetric detector.  This helps it get the most value out of the training
        # data.

        options.add_left_right_image_flips = True

        # The trainer is a kind of support vector machine and therefore has the usual
        # SVM C parameter.  In general, a bigger C encourages it to fit the training
        # data better but might lead to overfitting.  You must find the best C value
        # empirically by checking how well the trained detector works on a test set of
        # images you haven't trained on.  Don't just leave the value set at 5.  Try a
        # few different C values and see what works best for your data.

        options.C = 5

        # Tell the code how many CPU cores your computer has for the fastest training.
        options.num_threads = 4
        options.be_verbose = True

        trainingXML = os.path.join(self.tmp, 'training.xml')
        #Ideally there would be half training, half testing:
        testingXML = os.path.join(self.tmp, "training.xml")

        # This function does the actual training.  It will save the final detector to
        # detector.svm.  The input is an XML file that lists the images in the training
        # dataset and also contains the positions of the face boxes.  To create your
        # own XML files you can use the imglab tool which can be found in the
        # tools/imglab folder.  It is a simple graphical tool for labeling objects in
        # images with boxes.
        dlib.train_simple_object_detector(
            trainingXML, os.path.join(self.tmp, "detector.svm"), options)
        print("Training accuracy: {}".format(
            dlib.test_simple_object_detector(
                trainingXML, os.path.join(self.tmp, "detector.svm"))))
Exemple #16
0
def simple_training(trainpath="Training/", nbthreads=4, cvalue=5):
    train_folder = trainpath
    #Para treinarmos nosso dataset, chamaremos a classe train_simple_object_detector() com todos os seus valores default (que são razoavelmente bons)
    options = dlib.simple_object_detector_training_options()
    #Cartas são de certa forma simétricas, então, melhoraremos a qualidade do resultado mudando o padrão de add_left_right_image_flips para True
    options.add_left_right_image_flips = True
    #O valor de C encoraja um fitting melhor nos dados, mas um C muito grande encoraja overfitting, valor 5 ainda é experimental, precisamos de mais teste
    options.C = cvalue
    # Quantos Threads temos disponíveis para treinar em paralelo?
    options.num_threads = nbthreads
    #Vamos acompanhar o processo
    options.be_verbose = True
    #Concatene o diretório do treino e seu .xml correspondente numa string
    training_xml_path = os.path.join(train_folder, "training.xml")
    #testing_xml_path = os.path.join(train_folder, "testing.xml")
    #Finalmente chamamos a função que faz o treino de fato. Salva o resultado  em um detector .svm
    # a partir do nosso .xml (Dados do treino supervisionado)
    dlib.train_simple_object_detector(training_xml_path, "detector.svm",
                                      options)
    print("Training accuracy: {}".format(
        dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
    print("Process done sucesssfully")
 def fit(self, imagePaths, annotations, trainAnnot, trainImages, visualize=False, savePath=None):
     st = time.time()
     print(st)
     annotations = self._prepare_annotations(annotations)
     images = self._prepare_images(imagePaths)
     ###############################################
     ############### Entrenamiento
     self._detector = dlib.train_simple_object_detector(images, annotations, self.options)
     print('Entrenamiento completado. Tiempo: {:.2f} segundos'.format(time.time() - st))
     #visualize HOG
     if visualize:
         win = dlib.image_window()
         win.set_image(self._detector)
         dlib.hit_enter_to_continue()
     #Se guarda el detector y se evalua su desempeño usando las imagenes de prueba
     if savePath is not None:
         self._detector.save(savePath)
         trainAnnot = self._prepare_annotations(trainAnnot)
         trainImages = self._prepare_images(trainImages)
         print("Metricas de entrenamiento : {}".format(dlib.test_simple_object_detector(trainImages,trainAnnot,self._detector)) )
     
     return self
Exemple #18
0
def training_cat():
    import os
    import sys
    import glob
    import dlib
    import cv2

    # options用于设置训练的参数和模式
    options = dlib.simple_object_detector_training_options()
    # Since faces are left/right symmetric we can tell the trainer to train a
    # symmetric detector.  This helps it get the most value out of the training
    # data.
    options.add_left_right_image_flips = True
    # 支持向量机的C参数,通常默认取为5.自己适当更改参数以达到最好的效果
    options.C = 5
    # 线程数,你电脑有4核的话就填4
    options.num_threads = 4
    options.be_verbose = True

    # 获取路径
    current_path = os.getcwd()
    train_xml_path = r'E:\bigdata\ai\cat\cat.xml'
    print("training file path:" + train_xml_path)
    # print(train_xml_path)

    # 开始训练
    print("start training:")
    dlib.train_simple_object_detector(train_xml_path,
                                      r'E:\bigdata\ai\cat\detector.svm',
                                      options)

    print("---------")
    # 测试训练模型
    print("Training accuracy(准确性): {}".format(
        dlib.test_simple_object_detector(train_xml_path,
                                         r'E:\bigdata\ai\cat\detector.svm')))
def train_detector(train_data, filename='detector.svm'):
    '''Trains an object detector (HOG + SVM) and saves the model'''

    # Seperate the images and bounding boxes in different lists.
    images = [val[0] for val in train_data.values()]
    bounding_boxes = [val[1] for val in train_data.values()]

    # Initialize object detector Options
    options = dlib.simple_object_detector_training_options()
    options.add_left_right_image_flips = False
    options.C = 5

    # Train the model
    detector = dlib.train_simple_object_detector(images, bounding_boxes,
                                                 options)

    # Check results
    results = dlib.test_simple_object_detector(images, bounding_boxes,
                                               detector)
    print(f'Training Results: {results}')

    # Save model
    detector.save(filename)
    print(f'Saved the model to {filename}')
Exemple #20
0
import dlib

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-x", "--xml", required=True, help="path to input XML file")
ap.add_argument("-d",
                "--detector",
                required=True,
                help="Path to the object detector")
args = vars(ap.parse_args())

# grab the default training options for the HOG + Linear SVM detector, then
# train the detector -- in practice, the `C` parameter should be cross-validated
print("[INFO] training detector...")
options = dlib.simple_object_detector_training_options()
options.C = 1.0
options.num_threads = 4
options.be_verbose = True
options.upsample_limit = 1
dlib.train_simple_object_detector(args["xml"], args["detector"], options)

# show the training accuracy
print("[INFO] training accuracy: {}".format(
    dlib.test_simple_object_detector(args["xml"], args["detector"])))

# load the detector and visualize the HOG filter
detector = dlib.simple_object_detector(args["detector"])
win = dlib.image_window()
win.set_image(detector)
dlib.hit_enter_to_continue()
Exemple #21
0
# data.
options.add_left_right_image_flips = True
# The trainer is a kind of support vector machine and therefore has the usual
# SVM C parameter.  In general, a bigger C encourages it to fit the training
# data better but might lead to overfitting.  You must find the best C value
# empirically by checking how well the trained detector works on a test set of
# images you haven't trained on.  Don't just leave the value set at 5.  Try a
# few different C values and see what works best for your data.
options.C = 5
# Tell the code how many CPU cores your computer has for the fastest training.
options.num_threads = 4
options.be_verbose = True

training_xml_path = os.path.join(faces_folder, xml_file)
# This function does the actual training.  It will save the final detector to
# detector.svm.  The input is an XML file that lists the images in the training
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(training_xml_path, "detector.svm", options)

# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
# file.  But for this example, we just use the training.xml file included with
# dlib.
if not os.path.exists(os.path.join(ip_folder, "detector.svm")):
    os.chdir(os.path.dirname(training_xml_path))

    dlib.train_simple_object_detector("training.xml",
                                      os.path.join(ip_folder, "detector.svm"),
                                      options)

# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
os.chdir(os.path.dirname(training_xml_path))
train_test = dlib.test_simple_object_detector(
    "training.xml",
    os.path.join(ip_folder, "detector.svm").replace("\\", "/"))
print("Training accuracy: {}".format(train_test))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
#print("Testing accuracy: {}".format(dlib.test_simple_object_detector(testing_xml_path, os.path.join(current_dir,"detector.svm")))
os.chdir(os.path.dirname(testing_xml_path))
true_test = dlib.test_simple_object_detector(
    "testing.xml",
    os.path.join(ip_folder, "detector.svm").replace("\\", "/"))
print("Testing accuracy: {}".format(true_test))

os.chdir(current_dir)
# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
Exemple #23
0
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(training_xml_path, "dog_detector.svm", options)



# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "dog_detector.svm")))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print("Testing accuracy: {}".format(
    dlib.test_simple_object_detector(testing_xml_path, "dog_detector.svm")))





# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("dog_detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
Exemple #24
0
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(training_xml_path,
                                  "../biblioteca/detector_focinhos.svm",
                                  options)

# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path,
                                     "../biblioteca/detector_focinhos.svm")))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print("Testing accuracy: {}".format(
    dlib.test_simple_object_detector(testing_xml_path,
                                     "../biblioteca/detector_focinhos.svm")))

# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("../biblioteca/detector_focinhos.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)
Exemple #25
0
import cv2
import dlib
import os
from skimage import filter, data, io
from skimage.viewer import ImageViewer


learning_folder = 'learning'

options = dlib.simple_object_detector_training_options()
options.add_left_right_image_flips = True
options.C = 5
options.num_threads = 1
options.be_verbose = True

training_xml_path = os.path.join(learning_folder,'info.xml')

dlib.train_simple_object_detector(training_xml_path, 'info.svm', options)

print ("")
print ("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "info.svm")
))

detector = dlib.simple_object_detector("info.svm")

win_det = dlib.image_window()
win_det.set_image(detector)

dlib.hit_enter_to_continue()
# This function does the actual training.  It will save the final detector to
# detector.svm.  The input is an XML file that lists the images in the training
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(faces_folder + "/training.xml",
                                  "detector.svm", options)

# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print the precision, recall, and then
# average precision.
print "\ntraining accuracy:", dlib.test_simple_object_detector(
    faces_folder + "/training.xml", "detector.svm")
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print "testing accuracy: ", dlib.test_simple_object_detector(
    faces_folder + "/testing.xml", "detector.svm")

# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

# Now let's run the detector over the images in the faces folder and display the
Exemple #27
0
test_images = []
for train_image_file in train_image_files:
    train_images.append(io.imread(image_floder + train_image_file))
for test_image_file in test_image_files:
    test_images.append(io.imread(image_floder + test_image_file))

train_boxes = [([dlib.rectangle(left=371, top=455, right=723, bottom=943)]),
               ([dlib.rectangle(left=357, top=471, right=357+369, bottom=471+555)]),
               ([dlib.rectangle(left=413, top=313, right=413+319, bottom=313+451)]),
               ([dlib.rectangle(left=411, top=399, right=411+351, bottom=399+507)]),
               ([dlib.rectangle(left=287, top=439, right=287+423, bottom=439+537)]),
               ([dlib.rectangle(left=353, top=427, right=353+307, bottom=427+477)]),
               ([dlib.rectangle(left=369, top=569, right=369+385, bottom=569+601)]),
               ([dlib.rectangle(left=401, top=449, right=401+301, bottom=449+451)]),
               ([dlib.rectangle(left=265, top=577, right=265+417, bottom=577+597)]),
               ([dlib.rectangle(left=335, top=311, right=335+397, bottom=311+617)]),
               ([dlib.rectangle(left=341, top=381, right=341+447, bottom=381+645)]),
               ([dlib.rectangle(left=217, top=533, right=217+449, bottom=533+653)])]
test_boxes = [([dlib.rectangle(left=367, top=427, right=367+305, bottom=427+459)]),
              ([dlib.rectangle(left=481, top=309, right=481+473, bottom=309+709)]),
              ([dlib.rectangle(left=261, top=393, right=261+537, bottom=393+777)])]


detector = dlib.train_simple_object_detector(train_images, train_boxes, options)
# 保存模型
detector.save('stools.model')

print("\nTraining accuracy: {}".format(
    dlib.test_simple_object_detector(train_images, train_boxes, detector)))
print(dlib.test_simple_object_detector(test_images, test_boxes, detector))
# This function does the actual training.  It will save the final detector to
# detector.svm.  The input is an XML file that lists the images in the training
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(faces_folder+"/training.xml","detector.svm", options)



# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print the precision, recall, and then
# average precision.
print "\ntraining accuracy:", dlib.test_simple_object_detector(faces_folder+"/training.xml", "detector.svm")
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print "testing accuracy: ", dlib.test_simple_object_detector(faces_folder+"/testing.xml", "detector.svm")



# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)
Exemple #29
0
import os
import dlib
import cv2
import glob

print(
    dlib.test_simple_object_detector("recursos/teste_relogios.xml",
                                     "recursos/detector_relogios.svm"))

detectorRelogio = dlib.simple_object_detector("recursos/detector_relogios.svm")
for imagem in glob.glob(os.path.join("relogios_teste", "*.jpg")):
    img = cv2.imread(imagem)
    objetosDetectados = detectorRelogio(img, 2)
    for d in objetosDetectados:
        e, t, d, b = (int(d.left()), int(d.top()), int(d.right()),
                      int(d.bottom()))
        cv2.rectangle(img, (e, t), (d, b), (0, 0, 255), 2)

    cv2.imshow("Detector de relogios", img)
    cv2.waitKey(0)

cv2.destroyAllWindows()
    def train(train, model, test, flips, C, threads, verbose):

        # Now let's do the training.  The train_simple_object_detector() function has a
        # bunch of options, all of which come with reasonable default values.  The next
        # few lines goes over some of these options.
        options = dlib.simple_object_detector_training_options()

        #**********************REMOVE!!!!!!!!!!!!!!!!!******************
        #Since faces are left/right symmetric we can tell the trainer to train a
        # symmetric detector.  This helps it get the most value out of the training
        # data.
        options.add_left_right_image_flips = flips

        # The trainer is a kind of support vector machine and therefore has the usual
        # SVM C parameter.  In general, a bigger C encourages it to fit the training
        # data better but might lead to overfitting.  You must find the best C value
        # empirically by checking how well the trained detector works on a test set of
        # images you haven't trained on.  Don't just leave the value set at 5.  Try a
        # few different C values and see what works best for your data.
        options.C = C
        # Tell the code how many CPU cores your computer has for the fastest training.
        options.num_threads = threads
        options.be_verbose = verbose

        # You just need to put your images into a list. TrainingImages takes a folder
        #and extracts the images and bounding boxes for each image
        trainingSet = TrainingImages(train)
        images = trainingSet.images

        # Then for each image you make a list of rectangles which give the pixel
        # locations of the edges of the boxes.
        # And then you aggregate those lists of boxes into one big list and then call
        # train_simple_object_detector().
        boxes = trainingSet.boxes

        testImages = images
        testBoxes = boxes
        if test != train:
            testSet = TrainingImages(test)
            testImages = testSet.images
            testBoxes = testSet.boxes

        count = 1
        width = 0
        height = 0

        ##Calculating boxes, aspect ratios etc.  ***May need to adjust logic
        ##to accommodate ambiguity.
        ##Also saving new masked images to disk to verify the correct
        ##annotations are being detected.
        if not os.path.exists(train + "/masked"):
            os.makedirs(train + "/masked")
        aspRatios = []
        flatARs = []
        dictARs = {}
        for j, i in enumerate(boxes):
            curImageName = trainingSet.imageNames[j]
            print "Image: ", curImageName
            newImage = images[j].copy()
            aRs = []
            for box in i:
                cv2.rectangle(newImage, (box.top(), box.left()),
                              (box.bottom(), box.right()), (255, 255, 255),
                              thickness=-3)
                width += box.width()
                height += box.height()
                ar = float(box.width()) / float(box.height())
                aRs.append(ar)
                dictARs[ar] = box
                flatARs.append(ar)
                count += 1
                print "Box:   ", box, "\t Area:  ", box.area(), "\tAR:  ", aRs

            aspRatios.append(aRs)
            print "\nAspect Ratios:  ", aspRatios, "\nDictionary: ", dictARs
            baseName = curImageName.split("/")[-1]
            newImageName = train + "/masked/" + (baseName).replace(
                ".jpg", "-boxes.jpg")
            print "\nSaving:  ", newImageName, "\n"
            cv2.imwrite(newImageName, newImage)

        ##Calculating the mean and standard deviation (May not need mean
        ##not currently using it...)
        aRMean = np.mean(flatARs, 0)
        aRStd = np.std(flatARs, 0)

        print "Aspect Ratio Mean:  ", aRMean, "  Std:  ", aRStd

        target_size = float(width / count) * float(height / count)
        #Update the sliding window size based on input data
        width, height = PlateDetector.bestWindow(boxes,
                                                 target_size=target_size)
        targetSize = int(width * height)
        targetAr = float(width) / height
        options.detection_window_size = targetSize
        print "New Width: ", width, "\tNew Height", height, "!!!"
        print "Target size:  ", targetSize, "  Target AR:  ", targetAr

        ##Deleting boxes with aspect ratios that are above or below the target
        ##aspect ratio plus one standard deviation.  bestWindow estimates a target
        ##aspect ratio close to (but not exactly) the mean.  This logic was borrowed
        ##from a dlib C++ HOG training example.  Not sure why they didn't just use the
        ##mean, but this seems to work fine.
        for i, imgArs in enumerate(aspRatios):
            for boxArs in imgArs:
                if (boxArs > (targetAr + aRStd)) or (boxArs <
                                                     (targetAr - aRStd)):
                    print "Deleting box ", dictARs[boxArs]
                    boxes[i].remove(dictARs[boxArs])
                    print "New boxes:  ", boxes

        #Train
        detector = dlib.train_simple_object_detector(images, boxes, options)
        # We could save this detector to disk by uncommenting the following.
        detector.save(model)

        # Now let's look at its HOG filter!
        # win_det.set_image(detector)
        # dlib.hit_enter_to_continue()
        win_det = dlib.image_window()
        win_det.set_image(detector)

        # Note that you don't have to use the XML based input to
        # test_simple_object_detector().  If you have already loaded your training
        # images and bounding boxes for the objects then you can call it as shown
        # below.
        print("\nTraining accuracy: {}".format(
            dlib.test_simple_object_detector(testImages, testBoxes, detector)))
# This function does the actual training.  It will save the final detector to
# detector.svm.  The input is an XML file that lists the images in the training
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(training_xml_path, "detector.svm", options)

# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print("Testing accuracy: {}".format(
    dlib.test_simple_object_detector(testing_xml_path, "detector.svm")))





# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
print "start training"
dlib.train_simple_object_detector(training_xml_path, "detector.svm", options)
print "end training"


# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print ("")  # Print blank line to create gap from previous output
print ("Training accuracy: {}".format(dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print ("Testing accuracy: {}".format(dlib.test_simple_object_detector(testing_xml_path, "detector.svm")))


# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)

# Now let's run the detector over the images in the faces folder and display the
# -*- coding: utf-8 -*-

import os
from glob import glob

import cv2
import dlib

print(
    dlib.test_simple_object_detector(
        '../materials/recursos/test_clock.xml',
        '../materials/recursos/clock_detector.svm'))

clock_detector = dlib.simple_object_detector(
    '../materials/recursos/clock_detector.svm')
for image in glob(os.path.join('../materials/relogios_teste', '*.jpg')):
    img = cv2.imread(image)
    detected_objects = clock_detector(img, 2)

    for _object in detected_objects:
        l, t, r, b = int(_object.left()), int(_object.top()), int(
            _object.right()), int(_object.bottom())
        cv2.rectangle(img, (l, t), (r, b), (0, 255, 255), 2)

    cv2.imshow('Relógios detectados', img)
    cv2.waitKey(0)

cv2.destroyAllWindows()
Exemple #34
0
"""
This file demos the training of an SVM for object detection.
You should already a have labeled training set in the form of XML file.
Use imglab to create one if you have not.
"""

import dlib, os

options = dlib.simple_object_detector_training_options()
# Enable this option if your object is symmetrical.
# options.add_left_right_image_flips = True
options.C = 3  # Specify penalty parameter, too large - overfit, too small - underfit
options.num_threads = 4
options.be_verbose = True

training_xml_path = os.path.abspath('training.xml')

detector_svm_name = "detector.svm"

# Train SVM
dlib.train_simple_object_detector(training_xml_path, detector_svm_name,
                                  options)

# Validate SVM
# Should use a different image set for testing
accuracy_result = dlib.test_simple_object_detector(training_xml_path,
                                                   detector_svm_name)
Exemple #35
0
def main():
    #If cleanup is True then the new images and annotations will be appended to previous ones
    cleanup = True

    cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('frame', 1920, 1080)
    cv2.moveWindow("frame", 0, 0)
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    x1, y1 = 0, 0
    window_width = 190
    window_height = 190

    #Save images after every 4 frames to prevent duplicates
    skip_frames = 4
    frame_gap = 0

    # Store images here
    directory = 'train_images_h'
    box_file = 'boxes_h.txt'

    if cleanup:

        # Delete the images directory if it exists
        if os.path.exists(directory):
            shutil.rmtree(directory)

        open(box_file, 'w').close()

        # Initialize the counter to 0
        counter = 0

    elif os.path.exists(box_file):
        #Append new boxes to the previously stored ones
        with open(box_file, 'r') as text_file:
            box_content = text_file.read()

        # Set the counter to the previous highest checkpoint
        counter = int(box_content.split(':')[-2].split(',')[-1])

    fr = open(box_file, 'a')
    if not os.path.exists(directory):
        os.mkdir(directory)

    initial_wait = 0

    while (True):
        ret, frame = cap.read()
        if not ret:
            break

        # Invert the image laterally to get the mirror reflection.
        frame = cv2.flip(frame, 1)

        # Make a copy of the original frame
        orig = frame.copy()

        # Wait the first 50 frames so that you can place your hand correctly
        if initial_wait > 60:

            frame_gap += 1

            # Move the window to the right
            if x1 + window_width < frame.shape[1]:
                x1 += 4
                time.sleep(0.1)

            elif y1 + window_height + 270 < frame.shape[1]:

                #Move window down by 80px
                y1 += 80
                x1 = 0
                frame_gap = 0
                initial_wait = 0

            else:
                break

        else:
            initial_wait += 1

        # Save the image every nth frame.
        if frame_gap == skip_frames:

            # Set the image name equal to the counter value
            img_name = str(counter) + '.png'

            # Save the Image in the defined directory
            img_full_name = directory + '/' + str(counter) + '.png'
            cv2.imwrite(img_full_name, orig)

            # Save bounding box coordinates
            fr.write('{}:({},{},{},{}),'.format(counter, x1, y1,
                                                x1 + window_width,
                                                y1 + window_height))

            counter += 1
            frame_gap = 0

        # Draw the sliding window
        cv2.rectangle(frame, (x1, y1), (x1 + window_width, y1 + window_height),
                      (0, 255, 0), 3)

        # Display the frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
    fr.close()
    print("[INFO] Data collection complete...")

    # In this dictionary our images and annotations will be stored.
    data = {}

    # Get the indexes of all images.
    image_indexes = [
        int(img_name.split('.')[0]) for img_name in os.listdir(directory)
    ]

    # Shuffle the indexes to have random train/test split later on.
    np.random.shuffle(image_indexes)

    # Open and read the content of the boxes.txt file
    f = open(box_file, "r")
    box_content = f.read()

    # Convert the bounding boxes to dictionary in the format `index: (x1,y1,x2,y2)` ...
    box_dict = eval('{' + box_content + '}')

    # Close the file
    f.close()

    # Loop over all indexes
    for index in image_indexes:

        # Read the image in memmory and append it to the list
        img = cv2.imread(os.path.join(directory, str(index) + '.png'))

        # Read the associated bounding_box
        bounding_box = box_dict[index]

        # Convert the bounding box to dlib format
        x1, y1, x2, y2 = bounding_box
        dlib_box = [dlib.rectangle(left=x1, top=y1, right=x2, bottom=y2)]

        # Store the image and the box together
        data[index] = (img, dlib_box)

    # This is the percentage of data we will use to train
    # The rest will be used for testing
    percent = 0.8

    # How many examples make 80%.
    split = int(len(data) * percent)

    # Seperate the images and bounding boxes in different lists.
    images = [tuple_value[0] for tuple_value in data.values()]
    bounding_boxes = [tuple_value[1] for tuple_value in data.values()]

    # Initialize object detector Options
    options = dlib.simple_object_detector_training_options()

    # I'm disabling the horizontal flipping, becauase it confuses the detector
    # if you're training on few examples
    options.add_left_right_image_flips = False

    # Set the C parameter of SVM
    options.C = 5

    # Note the start time before training.
    st = time.time()

    # You can start the training now
    print("[INFO]Beginning training of the model...")
    detector = dlib.train_simple_object_detector(images[:split],
                                                 bounding_boxes[:split],
                                                 options)

    # Print the Total time taken to train the detector
    print('Training Completed, Total Time taken: {:.2f} seconds'.format(
        time.time() - st))

    file_name = 'Hand_Detector.svm'
    detector.save(file_name)

    win_det = dlib.image_window()
    win_det.set_image(detector)

    print("\nTraining Metrics: {}".format(
        dlib.test_simple_object_detector(images[:split],
                                         bounding_boxes[:split], detector)))

    print("Testing Metrics: {}".format(
        dlib.test_simple_object_detector(images[split:],
                                         bounding_boxes[split:], detector)))
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(training_xml_path, "detector.svm", options)



# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(
    dlib.test_simple_object_detector(training_xml_path, "detector.svm")))
# However, to get an idea if it really worked without overfitting we need to
# run it on images it wasn't trained on.  The next line does this.  Happily, we
# see that the object detector works perfectly on the testing images.
print("Testing accuracy: {}".format(
    dlib.test_simple_object_detector(testing_xml_path, "detector.svm")))





# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector("detector.svm")

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
Exemple #37
0
    plt.imshow(image[:, :, ::-1]);
    plt.axis('off');


percent = 0.8
split = int(len(data) * percent)

images = [tuple_value[0] for tuple_value in data.values()]
bounding_boxes = [tuple_value[1] for tuple_value in data.values()]

options = dlib.simple_object_detector_training_options()
options.add_left_right_image_flips = False
options.C = 8
st = time.time()
#detector = dlib.train_simple_object_detector(images[:split], bounding_boxes[:split], options)
detector = dlib.train_simple_object_detector(images, bounding_boxes, options)
print('Training Completed, Total Time taken: {:.2f} seconds'.format(time.time() - st))

#file_name = 'models/Hand_Detector_v8_c8.svm'
#detector.save(file_name)


win_det = dlib.image_window()
win_det.set_image(detector)

print("Training Metrics: {}".format(dlib.test_simple_object_detector(images[:split], bounding_boxes[:split], detector)))
print("Testing Metrics: {}".format(dlib.test_simple_object_detector(images[split:], bounding_boxes[split:], detector)))

porter = Porter(detector, language='C')
output = porter.export(embed_data=True)
print(output)
# Tell the code how many CPU cores your computer has for the fastest training.
options.num_threads = 8
options.be_verbose = True

training_xml_path = "signs.xml"
## testing_xml_path = os.path.join(faces_folder, "testing.xml")
# This function does the actual training.  It will save the final detector to
# detector.svm.  The input is an XML file that lists the images in the training
# dataset and also contains the positions of the face boxes.  To create your
# own XML files you can use the imglab tool which can be found in the
# tools/imglab folder.  It is a simple graphical tool for labeling objects in
# images with boxes.  To see how to use it read the tools/imglab/README.txt
# file.  But for this example, we just use the training.xml file included with
# dlib.
dlib.train_simple_object_detector(training_xml_path, TRAINING, options)

# Now that we have a face detector we can test it.  The first statement tests
# it on the training data.  It will print(the precision, recall, and then)
# average precision.
print("")  # Print blank line to create gap from previous output
print("Training accuracy: {}".format(dlib.test_simple_object_detector(training_xml_path, TRAINING)))

# Now let's use the detector as you would in a normal application.  First we
# will load it from disk.
detector = dlib.simple_object_detector(TRAINING)

# We can look at the HOG filter we learned.  It should look like a face.  Neat!
win_det = dlib.image_window()
win_det.set_image(detector)