コード例 #1
0
ファイル: eig_model.py プロジェクト: gharveymn/celltracking
def model_build(path=os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "res", "train"), feature=PCA(), dist_metric=EuclideanDistance(), k=1, sz=None):
    model_fn = os.path.join(path, "mdl.pkl")
    if not os.path.isfile(model_fn):
        [X,y] = read_images(path, sz=sz)
        classifier = NearestNeighbor(dist_metric=dist_metric, k=k)
        model = PredictableModel(feature=feature, classifier=classifier)
        model.compute(X, y)
        save_model(model_fn, model)
    return load_model(model_fn)
コード例 #2
0
def get_model(numeric_dataset, model_filename=None):
    feature = ChainOperator(Resize((128,128)), Fisherfaces())
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    inner_model = PredictableModel(feature=feature, classifier=classifier)
    model = PredictableModelWrapper(inner_model)
    model.set_data(numeric_dataset)
    model.compute()
    if not model_filename is None:
        save_model(model_filename, model)
    return model
コード例 #3
0
ファイル: recognition.py プロジェクト: leandroloi/facerec
def get_model(numeric_dataset, model_filename=None):
    feature = ChainOperator(Resize((128,128)), Fisherfaces())
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    inner_model = PredictableModel(feature=feature, classifier=classifier)
    model = PredictableModelWrapper(inner_model)
    model.set_data(numeric_dataset)
    model.compute()
    if not model_filename is None:
        save_model(model_filename, model)
    return model
コード例 #4
0
def create_model_file(username, image_path, feature, classifier):
    # read images and set labels
    [X, y] = read_images(image_path)
    # Define the model as the combination
    model = PredictableModel(feature=feature.value, classifier=classifier.value)

    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)

    # We then save the model, which uses Pythons pickle module:
    model_name = username + "_model.pkl"
    save_model(model_name, model)
コード例 #5
0
ファイル: model_creator.py プロジェクト: oloa/Tinder-ML
def create_model_file(username, image_path, feature, classifier):
    # read images and set labels
    [X, y] = read_images(image_path)
    # Define the model as the combination
    model = PredictableModel(feature=feature.value,
                             classifier=classifier.value)

    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)

    # We then save the model, which uses Pythons pickle module:
    model_name = username + "_model.pkl"
    save_model(model_name, model)
コード例 #6
0
ファイル: facerec_tools.py プロジェクト: leoneckert/facerec
def computeAndSaveModel(path_to_database, path_for_model_output, size, model_type="Fisherfaces", num_components=0, classifier_neighbours=1):
    print "\n[+] Saving new model (confirmed below)."    
    [X,y,names] = read_images(path_to_database, sz=size)
    if model_type == "Eigenfaces":
        model = PredictableModel(PCA(num_components=num_components), NearestNeighbor(k=classifier_neighbours), dimensions=size, namesDict=names)
    elif model_type == "Fisherfaces":
        model = PredictableModel(Fisherfaces(num_components=num_components), NearestNeighbor(k=classifier_neighbours), dimensions=size, namesDict=names)
    else:
        print "[-] specify the type of model you want to comput as either 'Fisherface' or 'Eigenface' in the computeAndSaveModel function."
        return False

    model.compute(X,y)   
    save_model(path_for_model_output, model)
    print "\n[+] Saving confirmed. New model saved to:", path_for_model_output
コード例 #7
0
def train(train_path):
    # Now read in the image data. This must be a valid path!
    [X, y, class_names] = read_images(train_path)
    print X, y, class_names
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
        E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces",
            images=E,
            rows=4,
            cols=4,
            sptitle="Fisherface",
            colormap=cm.jet,
            filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()
    save_model('model.pkl', model, class_names)
    return [model, class_names]
コード例 #8
0
ファイル: LDA.py プロジェクト: bazilik/nazi-camera
def train(train_path):
    # Now read in the image data. This must be a valid path!
    [X,y,class_names] = read_images(train_path)
    print X,y,class_names
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
        E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", 
        colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()
    save_model('model.pkl', model, class_names)
    return [model,class_names]
コード例 #9
0
     '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
 handler.setFormatter(formatter)
 # Add handler to facerec modules, so we see what's going on inside:
 logger = logging.getLogger("facerec")
 logger.addHandler(handler)
 logger.setLevel(logging.DEBUG)
 # Define the Fisherfaces as Feature Extraction method:
 feature = Fisherfaces()
 # Define a 1-NN classifier with Euclidean Distance:
 classifier = SVM()
 # Define the model as the combination
 model = PredictableModel(feature=feature, classifier=classifier)
 # Compute a model:
 model.compute(X, y)
 # Save the Model using joblib:
 save_model('model.pkl', model)
 # Perform a Grid Search for the Set of Parameters:
 tuned_parameters = [{
     'kernel': ['rbf'],
     'gamma': [1e-3, 1e-4],
     'C': [1, 10, 100, 1000]
 }, {
     'kernel': ['linear'],
     'C': [1, 10, 100, 1000]
 }]
 # Find a good set of parameters:
 grid_search(model, X, y, tuned_parameters)
 # Perform a 10-fold cross validation
 cv = KFoldCrossValidation(model, k=10)
 cv.validate(X, y)
 # And print the result:
コード例 #10
0
ファイル: create_model.py プロジェクト: UieLinux/uiefaces
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    my_model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    my_model.compute(X, y)
    # We then save the model, which uses Pythons pickle module:
    save_model('model.pkl', my_model)
    model = load_model('model.pkl')
    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
        E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
    # And print the result:
    cv.print_results()
コード例 #11
0
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
#---------------------------------------------
#    print "Generating model"
    if(not os.path.exists("./temp/mymodel")):
      model.compute(X, y)
      save_model("./temp/mymodel", model)  #saving model here - CHANGE THIS
      exit()
    
#    print "loading model"
    model = load_model("./temp/mymodel")
#    print "loaded model"
    urlForImage = sys.argv[2]
    tmpfilename = "./temp/"+str(urlForImage.split('/')[-1])  #saving image here - CHANGE THIS
    urllib.urlretrieve(urlForImage, tmpfilename)
    im = Image.open(tmpfilename) #add rotate of 90? Don't think so.
    im = im.resize((648,486), Image.ANTIALIAS)
    im = im.convert("L")
#    print "hello",str(im.size)
    im.show()
    to_predict_x = np.asarray(im, dtype=np.uint8)
    li=model.predict(to_predict_x)
コード例 #12
0
            handler = logging.StreamHandler(sys.stdout)
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            handler.setFormatter(formatter)

            logger = logging.getLogger("facerec")
            logger.addHandler(handler)
            logger.setLevel(logging.DEBUG)

            crossval = KFoldCrossValidation(model, k=options.numfolds)
            crossval.validate(images, labels)
            crossval.print_results()

        print "Computing the model..."
        model.compute(images, labels)

        print "Saving the model..."
        save_model(model_filename, model)
    else:
        print "Loading the model..."
        model = load_model(model_filename)

    if not isinstance(model, ExtendedPredictableModel):
        print "[Error] The given model is not of type '%s'." % "ExtendedPredictableModel"
        sys.exit()

    print "Starting application..."
    App(model=model,
        camera_id=options.camera_id,
        cascade_filename=options.cascade_filename).run()
コード例 #13
0
ファイル: simple_example.py プロジェクト: rheiland/facerec
 formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
 handler.setFormatter(formatter)
 # Add handler to facerec modules, so we see what's going on inside:
 logger = logging.getLogger("facerec")
 logger.addHandler(handler)
 logger.setLevel(logging.DEBUG)
 # Define the Fisherfaces as Feature Extraction method:
 feature = Fisherfaces()
 # Define a 1-NN classifier with Euclidean Distance:
 classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
 # Define the model as the combination
 my_model = PredictableModel(feature=feature, classifier=classifier)
 # Compute the Fisherfaces on the given data (in X) and labels (in y):
 my_model.compute(X, y)
 # We then save the model, which uses Pythons pickle module:
 save_model("model.pkl", my_model)
 model = load_model("model.pkl")
 # Then turn the first (at most) 16 eigenvectors into grayscale
 # images (note: eigenvectors are stored by column!)
 E = []
 for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
     e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
     E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))
 # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
 subplot(
     title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png"
 )
 # Perform a 10-fold cross validation
 cv = KFoldCrossValidation(model, k=10)
 cv.validate(X, y)
 # And print the result:
コード例 #14
0
ファイル: facedetecti.py プロジェクト: rahulroxx/duinoBot
def start():
    from optparse import OptionParser
    # model.pkl is a pickled (hopefully trained) PredictableModel, which is
    # used to make predictions. You can learn a model yourself by passing the
    # parameter -d (or --dataset) to learn the model from a given dataset.
    usage = "usage: %prog [options] model_filename"
    # Add options for training, resizing, validation and setting the camera id:
    parser = OptionParser(usage=usage)
    parser.add_option("-r", "--resize", action="store", type="string", dest="size", default="100x100",
                      help="Resizes the given dataset to a given size in format [width]x[height] (default: 100x100).")
    parser.add_option("-v", "--validate", action="store", dest="numfolds", type="int", default=None,
                      help="Performs a k-fold cross validation on the dataset, if given (default: None).")
    parser.add_option("-t", "--train", action="store", dest="dataset", type="string", default=None,
                      help="Trains the model on the given dataset.")
    parser.add_option("-i", "--id", action="store", dest="camera_id", type="int", default=0,
                      help="Sets the Camera Id to be used (default: 0).")
    parser.add_option("-c", "--cascade", action="store", dest="cascade_filename",
                      default="haarcascade_frontalface_alt2.xml",
                      help="Sets the path to the Haar Cascade used for the face detection part (default: haarcascade_frontalface_alt2.xml).")
    # Show the options to the user:
    parser.print_help()
    print "Press [ESC] to exit the program!"
    print "Script output:"
    # Parse arguments:
    (options, args) = parser.parse_args()
    # Check if a model name was passed:
    dataset = "C:\\Users\\newbie\\PycharmProjects\\duinobot\\scripts\\test"
    cascade_filename = "C:\\Users\\newbie\\PycharmProjects\\duinobot\\scripts\\haarcascade_frontalface_alt2.xml"
    if len(args) == 0:
        print "[Error] No prediction model was given."
        sys.exit()
    # This model will be used (or created if the training parameter (-t, --train) exists:
    model_filename = args[0]
    # Check if the given model exists, if no dataset was passed:
    if (dataset is None) and (not os.path.exists(model_filename)):
        print "[Error] No prediction model found at '%s'." % model_filename
        sys.exit()
    # Check if the given (or default) cascade file exists:

    if not os.path.exists(cascade_filename):
        print "[Error] No Cascade File found at '%s'." % cascade_filename
        sys.exit()
    # We are resizing the images to a fixed size, as this is neccessary for some of
    # the algorithms, some algorithms like LBPH don't have this requirement. To
    # prevent problems from popping up, we resize them with a default value if none
    # was given:
    try:
        image_size = (int(options.size.split("x")[0]), int(options.size.split("x")[1]))
    except:
        print "[Error] Unable to parse the given image size '%s'. Please pass it in the format [width]x[height]!" % options.size
        sys.exit()
    # We have got a dataset to learn a new model from:
    if dataset:
        # Check if the given dataset exists:
        if not os.path.exists(dataset):
            print "[Error] No dataset found at '%s'." % dataset_path
            sys.exit()
        # Reads the images, labels and folder_names from a given dataset. Images
        # are resized to given size on the fly:
        print "Loading dataset..."
        [images, labels, subject_names] = read_images(dataset, image_size)
        # Zip us a {label, name} dict from the given data:
        list_of_labels = list(xrange(max(labels) + 1))
        subject_dictionary = dict(zip(list_of_labels, subject_names))
        # Get the model we want to compute:
        model = get_model(image_size=image_size, subject_names=subject_dictionary)
        # Sometimes you want to know how good the model may perform on the data
        # given, the script allows you to perform a k-fold Cross Validation before
        # the Detection & Recognition part starts:
        if options.numfolds:
            print "Validating model with %s folds..." % options.numfolds
            # We want to have some log output, so set up a new logging handler
            # and point it to stdout:
            handler = logging.StreamHandler(sys.stdout)
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            handler.setFormatter(formatter)
            # Add a handler to facerec modules, so we see what's going on inside:
            logger = logging.getLogger("facerec")
            logger.addHandler(handler)
            logger.setLevel(logging.DEBUG)
            # Perform the validation & print results:
            crossval = KFoldCrossValidation(model, k=options.numfolds)
            crossval.validate(images, labels)
            crossval.print_results()
        # Compute the model:
        print "Computing the model..."
        model.compute(images, labels)
        # And save the model, which uses Pythons pickle module:
        print "Saving the model..."
        save_model(model_filename, model)
    else:
        print "Loading the model..."
        model = load_model(model_filename)
    # We operate on an ExtendedPredictableModel. Quit the application if this
    # isn't what we expect it to be:
    if not isinstance(model, ExtendedPredictableModel):
        print "[Error] The given model is not of type '%s'." % "ExtendedPredictableModel"
        sys.exit()
    # Now it's time to finally start the Application! It simply get's the model
    # and the image size the incoming webcam or video images are resized to:
    print "Starting application..."
    App(model=model,
        camera_id=options.camera_id,
        cascade_filename=cascade_filename).run()
コード例 #15
0
ファイル: recogniseD.py プロジェクト: ianjuma/recogniseD
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            handler.setFormatter(formatter)
            # Add a handler to facerec modules, so we see what's going on inside:
            logger = logging.getLogger("facerec")
            logger.addHandler(handler)
            logger.setLevel(logging.DEBUG)
            # Perform the validation & print results:
            crossval = KFoldCrossValidation(model, k=options.numfolds)
            crossval.validate(images, labels)
            crossval.print_results()
        # Compute the model:
        print "Computing the model..."
        model.compute(images, labels)
        # And save the model, which uses Pythons pickle module:
        print "Saving the model..."
        save_model(model_filename, model)
    else:
        print "Loading the model..."
        model = load_model(model_filename)
    # We operate on an ExtendedPredictableModel. Quit the application if this
    # isn't what we expect it to be:
    if not isinstance(model, ExtendedPredictableModel):
        print "[Error] The given model is not of type '%s'." % "ExtendedPredictableModel"
        sys.exit()
    # Now it's time to finally start the Application! It simply get's the model
    # and the image size the incoming webcam or video images are resized to:
    print "Starting application..."
    App(_model=model,
        camera_id=options.camera_id,
        cascade_filename=options.cascade_filename).run()
コード例 #16
0
    def Init(self):
        from optparse import OptionParser
        # model.pkl is a pickled (hopefully trained) PredictableModel, which is
        # used to make predictions. You can learn a model yourself by passing the
        # parameter -d (or --dataset) to learn the model from a given dataset.
        usage = "usage: %prog [options] model_filename"
        # Add options for training, resizing, validation and setting the camera id:
        parser = OptionParser(usage=usage)
        parser.add_option(
            "-r",
            "--resize",
            action="store",
            type="string",
            dest="size",
            default="100x100",
            help=
            "Resizes the given dataset to a given size in format [width]x[height] (default: 100x100)."
        )
        parser.add_option(
            "-v",
            "--validate",
            action="store",
            dest="numfolds",
            type="int",
            default=None,
            help=
            "Performs a k-fold cross validation on the dataset, if given (default: None)."
        )
        parser.add_option("-t",
                          "--train",
                          action="store",
                          dest="dataset",
                          type="string",
                          default=None,
                          help="Trains the model on the given dataset.")
        parser.add_option("-i",
                          "--id",
                          action="store",
                          dest="camera_id",
                          type="int",
                          default=0,
                          help="Sets the Camera Id to be used (default: 0).")
        parser.add_option(
            "-c",
            "--cascade",
            action="store",
            dest="cascade_filename",
            default="haarcascade_frontalface_default.xml",
            help=
            "Sets the path to the Haar Cascade used for the face detection part (default: haarcascade_frontalface_alt2.xml)."
        )
        # Show the options to the user:
        parser.print_help()
        print "Press [ESC] to exit the program!"
        print "Script output:"
        # Parse arguments:
        (options, args) = parser.parse_args()
        print(options, args)
        # Check if a model name was passed:
        my_model = 'my_model.pk'
        '''
        if len(args) == 0:
            print "[Error] No prediction model was given."
            sys.exit()
        '''
        # This model will be used (or created if the training parameter (-t, --train) exists:
        #model_filename = args[0]
        model_filename = my_model

        options.dataset = 'faces'
        # Check if the given model exists, if no dataset was passed:
        if (options.dataset is None) and (not os.path.exists(model_filename)):
            print "[Error] No prediction model found at '%s'." % model_filename
            sys.exit()
        # Check if the given (or default) cascade file exists:
        if not os.path.exists(options.cascade_filename):
            print "[Error] No Cascade File found at '%s'." % options.cascade_filename
            sys.exit()
        # We are resizing the images to a fixed size, as this is neccessary for some of
        # the algorithms, some algorithms like LBPH don't have this requirement. To
        # prevent problems from popping up, we resize them with a default value if none
        # was given:
        try:
            image_size = (int(options.size.split("x")[0]),
                          int(options.size.split("x")[1]))
        except:
            print "[Error] Unable to parse the given image size '%s'. Please pass it in the format [width]x[height]!" % options.size
            sys.exit()
        # We have got a dataset to learn a new model from:
        if options.dataset:
            print('data set')
            print(options.dataset)
            # Check if the given dataset exists:
            if not os.path.exists(options.dataset):
                print "[Error] No dataset found at '%s'." % dataset_path
                sys.exit()
            # Reads the images, labels and folder_names from a given dataset. Images
            # are resized to given size on the fly:
            print "Loading dataset..."
            [images, labels,
             subject_names] = read_images(options.dataset, image_size)
            # Zip us a {label, name} dict from the given data:
            list_of_labels = list(xrange(max(labels) + 1))
            subject_dictionary = dict(zip(list_of_labels, subject_names))
            # Get the model we want to compute:
            model = get_model(image_size=image_size,
                              subject_names=subject_dictionary)
            # Sometimes you want to know how good the model may perform on the data
            # given, the script allows you to perform a k-fold Cross Validation before
            # the Detection & Recognition part starts:
            if options.numfolds:
                print "Validating model with %s folds..." % options.numfolds
                # We want to have some log output, so set up a new logging handler
                # and point it to stdout:
                handler = logging.StreamHandler(sys.stdout)
                formatter = logging.Formatter(
                    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
                handler.setFormatter(formatter)
                # Add a handler to facerec modules, so we see what's going on inside:
                logger = logging.getLogger("facerec")
                logger.addHandler(handler)
                logger.setLevel(logging.DEBUG)
                # Perform the validation & print results:
                crossval = KFoldCrossValidation(model, k=options.numfolds)
                crossval.validate(images, labels)
                crossval.print_results()
            # Compute the model:
            print "Computing the model..."
            model.compute(images, labels)
            # And save the model, which uses Pythons pickle module:
            print "Saving the model..."
            save_model(model_filename, model)
        else:
            print "Loading the model..."
            model = load_model(model_filename)

        # We operate on an ExtendedPredictableModel. Quit the application if this
        # isn't what we expect it to be:
        if not isinstance(model, ExtendedPredictableModel):
            print "[Error] The given model is not of type '%s'." % "ExtendedPredictableModel"
            sys.exit()
        # Now it's time to finally start the Application! It simply get's the model
        # and the image size the incoming webcam or video images are resized to:
        print "Starting application..."
        self.__faceRecognizer = recognizer(
            model=model,
            camera_id=options.camera_id,
            cascade_filename=options.cascade_filename)
コード例 #17
0
ファイル: modeling.py プロジェクト: sangmoon/facerec_cv
                            print "I/O error({0}): {1}".format(errno, strerror)
                        except:
                            print "Unexpected error:", sys.exc_info()[0]
                            raise
            c = c + 1
    return [X, y]


if __name__ == "__main__":
    # This is where we write the images, if an output_dir is given
    # in command line:
    out_dir = None
    # You'll need at least a path to your image data, please see
    # the tutorial coming with this source code on how to prepare
    # your image data:
    if len(sys.argv) < 2:
        print "USAGE: facerec_demo.py </path/to/images>"
        sys.exit()
    # Now read in the image data. This must be a valid path!
    [X, y] = read_images(sys.argv[1])
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=3)
    # Define the model as the combination
    my_model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    my_model.compute(X, y)
    # We then save the model, which uses Pythons pickle module:
    save_model('myModel.pkl', my_model)
コード例 #18
0
    #feature = PCA()

    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()

    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)

    # Define the model as the combination
    my_model = PredictableModel(feature=feature, classifier=classifier)

    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    my_model.compute(X, y)

    # We then save the model, which uses Pythons pickle module:
    save_model('model_like.pkl', my_model)
    #model = load_model('model.pkl')

    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    '''
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 122)):
        e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
        E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))

    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")

    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
コード例 #19
0
ファイル: main.py プロジェクト: triptolemusew/adaptiveui
    def cameraStack(self):
        model_filename = "model_gender_working.pkl"
        image_size = (200,200)
        [images, labels, subject_names] = read_images("gender/", image_size)
        list_of_labels = list(xrange(max(labels)+1))
        subject_dictionary = dict(zip(list_of_labels, subject_names))
        model = get_model(image_size=image_size, subject_names=subject_dictionary)
        model.compute(images, labels)
        print "save model"
        save_model(model_filename, model)

        self.model_gender = load_model(model_filename)

        model_filename = "model_emotion.pkl"
        image_size = (200, 200)
        [images, labels, subject_names] = read_images("emotion/", image_size)
        list_of_labels = list(xrange(max(labels) + 1))
        subject_dictionary = dict(zip(list_of_labels, subject_names))
        model = get_model(image_size=image_size, subject_names=subject_dictionary)
        model.compute(images, labels)
        print "save model"
        save_model(model_filename, model)

        self.model_emotion = load_model(model_filename)

        faceCascade = 'haarcascade_frontalface_alt2.xml'
        print self.model_gender.image_size
        print "Starting the face detection"

        self.detector = CascadedDetector(cascade_fn=faceCascade, minNeighbors=5, scaleFactor=1.1)
        self.video_capture = cv2.VideoCapture(0)

        while True:
            ret, frame = self.video_capture.read()
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2), interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                self.x0 = x0
                self.y0 = y0
                self.x1 = x1
                self.y1 = y1
                # (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.model_gender.image_size, interpolation=cv2.INTER_CUBIC)
                # Get a prediction from the model:
                prediction = self.model_gender.predict(face)[0]
                emotion = self.model_emotion.predict(face)[0]
                # Draw the face area in image:
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                # Draw the predicted name (folder name...):
                self.distance = str(np.asscalar(np.int16(self.y0)))
                draw_str(imgout, (x0 - 20, y0 - 5), self.model_emotion.subject_names[emotion])
                draw_str(imgout, (x0 - 20, y0 - 20), self.model_gender.subject_names[prediction])
                draw_str(imgout, (x0 - 20, y0 - 35), "distance: " + self.distance + "cm")
                self.gender = self.model_gender.subject_names[prediction]
                self.changeSetting(self.currently_playing_button)
                self.changeSetting(self.notifications_button)
                self.changeSetting(self.likes_button)
                self.changeSetting(self.collections_button)
            cv2.imshow('video', imgout)
            ch = cv2.waitKey(10)
            if ch == 27:
                break