示例#1
0
class App(object):
    def __init__(
        self,
        video_src,
        dataset_fn,
        face_sz=(130, 130),
        cascade_fn="/home/philipp/projects/opencv2/OpenCV-2.3.1/data/haarcascades/haarcascade_frontalface_alt2.xml"
    ):
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        ret, self.frame = self.cam.read()
        self.detector = CascadedDetector(cascade_fn=cascade_fn,
                                         minNeighbors=5,
                                         scaleFactor=1.1)
        # define feature extraction chain & and classifier)
        feature = ChainOperator(TanTriggsPreprocessing(), LBP())
        classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
        # build the predictable model
        self.predictor = PredictableModel(feature, classifier)
        # read the data & compute the predictor
        self.dataSet = DataSet(filename=dataset_fn, sz=self.face_sz)
        self.predictor.compute(self.dataSet.data, self.dataSet.labels)

    def run(self):
        while True:
            ret, frame = self.cam.read()
            # resize the frame to half the original size
            img = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2),
                             interpolation=cv2.INTER_CUBIC)
            imgout = img.copy()
            for i, r in enumerate(self.detector.detect(img)):
                x0, y0, x1, y1 = r
                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face,
                                  self.face_sz,
                                  interpolation=cv2.INTER_CUBIC)
                # get a prediction
                prediction = self.predictor.predict(face)[0]
                # draw the face area
                cv2.rectangle(imgout, (x0, y0), (x1, y1), (0, 255, 0), 2)
                # draw the predicted name (folder name...)
                draw_str(imgout, (x0 - 20, y0 - 20),
                         self.dataSet.names[prediction])
            cv2.imshow('videofacerec', imgout)
            # get pressed key
            ch = cv2.waitKey(10)
            if ch == 27:
                break
示例#2
0
class App(object):
    def __init__(self, video_src, dataset_fn, face_sz=(130,130), cascade_fn=join(curpath, 'haarcascade_frontalface_alt2.xml')):
        self.face_sz = face_sz
        self.cam = create_capture(video_src)
        ret, self.frame = self.cam.read()
        self.detector = CascadedDetector(cascade_fn=cascade_fn, minNeighbors=5, scaleFactor=1.1)
        # define feature extraction chain & and classifier)
        feature = ChainOperator(TanTriggsPreprocessing(), LBP())
        classifier = NearestNeighbor(dist_metric=ChiSquareDistance())
        # build the predictable model
        self.predictor = PredictableModel(feature, classifier)
        # read the data & compute the predictor
        self.dataSet = DataSet(filename=dataset_fn,sz=self.face_sz)
        self.predictor.compute(self.dataSet.data,self.dataSet.labels)

    def run(self):
        while True:
            ret, frame = self.cam.read()

            # resize the frame to half the original size
            img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
            imgout = img.copy()
            for i,r in enumerate(self.detector.detect(img)):
                x0,y0,x1,y1 = r

                # get face, convert to grayscale & resize to face_sz
                face = img[y0:y1, x0:x1]
                face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
                face = cv2.resize(face, self.face_sz, interpolation = cv2.INTER_CUBIC)

                # get a prediction
                prediction = self.predictor.predict(face)

                # draw the face area
                cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)

                # draw the predicted name (folder name...)
                draw_str(imgout, (x0-20,y0-20), self.dataSet.names[prediction])

            cv2.imshow('videofacerec', imgout)

            # get pressed key
            ch = cv2.waitKey(10)
            if ch == 27:
                break
示例#3
0
def test_one_method(input_faces, test_faces, feature, classifier, chain=True):
    if chain:
        feature = ChainOperator(TanTriggsPreprocessing(), feature)

    model = PredictableModel(feature, classifier)
    id_list, face_list = zip(*input_faces)

    start = time.clock()
    model.compute(face_list, id_list)
    stop = time.clock()
    training_time = stop-start

    res_list = []
    start = time.clock()
    for id, image in test_faces:
        res = model.predict(image)
        res_list.append([id]+res)
    stop = time.clock()
    predict_time = stop-start

    return (training_time, predict_time, res_list)
def checkFace(origin_img):
    #To do
    model = PredictableModel(Fisherfaces(), NearestNeighbor())
    
    result_name = 'unknown'
    
    [X,y,subject_names] = read_images(path)
    list_of_labels = list(xrange(max(y)+1))
    subject_dictionary = dict(zip(list_of_labels, subject_names))
    model.compute(X,y)

    gray = cv2.cvtColor(origin_img, cv2.COLOR_BGR2GRAY)
    sampleImage = cv2.resize(gray, (256,256))
        
    [ predicted_label, generic_classifier_output] = model.predict(sampleImage)
    print [ predicted_label, generic_classifier_output]
        
    if int(generic_classifier_output['distances']) <=  700:
        result_name = str(subject_dictionary[predicted_label])

    return result_name
示例#5
0
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
#---------------------------------------------
#    print "Generating model"
    if(not os.path.exists("./temp/mymodel")):
      model.compute(X, y)
      save_model("./temp/mymodel", model)  #saving model here - CHANGE THIS
      exit()
    
#    print "loading model"
    model = load_model("./temp/mymodel")
#    print "loaded model"
    urlForImage = sys.argv[2]
    tmpfilename = "./temp/"+str(urlForImage.split('/')[-1])  #saving image here - CHANGE THIS
    urllib.urlretrieve(urlForImage, tmpfilename)
    im = Image.open(tmpfilename) #add rotate of 90? Don't think so.
    im = im.resize((648,486), Image.ANTIALIAS)
    im = im.convert("L")
#    print "hello",str(im.size)
    im.show()
    to_predict_x = np.asarray(im, dtype=np.uint8)
    li=model.predict(to_predict_x)
    if(int(li[1]['distances'])<10000):
#      print str(li)
#      print str(d)
      print str(d[li[0]])
#      print "Authenticated as ",str(li[0]),":",str(d[li[0]])," with distance : ",str(li[1]['distances']) #set threshold as 10000
    else:
      print '-1'
#      print "Could not Authenticate with distance : ",str(li[1]['distances'][0])," for ",str(li[0]),":",str(d[li[0]])
示例#6
0
    model.compute(X, y)

    logger.debug(model)

    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
        E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))
    # Plot them and store the plot to "fisherfaces.png"
    subplot(title="Fisherfaces",
            images=E,
            rows=4,
            cols=4,
            sptitle="Fisherface",
            colormap=cm.jet,
            filename="fisherfaces.png")

    logger.debug("Iniciando teste.")
    [images_test, labels_test] = read_images(database_path, None, None, False)
    i = 0
    rate = 0
    for im_test in images_test:
        prediction = model.predict(im_test)
        if prediction[0] == labels_test[i]:
            rate += 1
        i += 1
    classification_rate = rate * 100.0 / i
    error = 100 - classification_rate
    logger.debug("Classification rate: %f%%", classification_rate)
示例#7
0
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)
    prediction = model.predict(X)
    predicted_label = prediction[0]
    classifier_output = prediction[1]

    distance = classifier_output['distances'][0]
    print distance

    E = []
    for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
        e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
        E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # Plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
    subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")
    # Perform a 10-fold cross validation
    cv = KFoldCrossValidation(model, k=10)
    cv.validate(X, y)
示例#8
0
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)

    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    # E = []
    # for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
    #     e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
    #     E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))


    img_path = 'rawand1.jpg'
    coverted_img_path = "temp_%s" % img_path
    detect_face(img_path,outfile=coverted_img_path)
    img = Image.open(coverted_img_path)
    img = img.convert("L")

    p = model.predict(img)[0]
    label = keys[p]
    print label

    [X, y, keys] = read_images("../faces2/", keys=keys)
    model.classifier.update(X,y)


    p = model.predict(img)[0]
    label = keys[p]
    print label

示例#9
0
文件: face_rec.py 项目: orvitinn/msc
    print "Train the model"
    start = time.clock()
    # model.compute(X, y)
    model.compute(face_list, id_list)
    stop = time.clock()
    print "Training done in", stop - start, " next...find a face"

    target = "10.bmp"
    if len(sys.argv) > 3:
        target = sys.argv[3]

    fp = utils.FaceProcessor()

    while target != "quit":
        # prufu_mynd = Image.open(os.path.join(path, target))
        prufu_mynd = cv2.imread(os.path.join(path, target))
        print "Nota mynd: ", os.path.join(path, target)
        if prufu_mynd is not None:
            prufu_mynd = fp.process_image(prufu_mynd)
            if prufu_mynd is None:
                print "fann ekkert andlit!"
            else:
                start = time.clock()
                # res = model.predict(td)
                res = model.predict(prufu_mynd)
                stop = time.clock()
                print res
                print "time: ", stop - start
        target = raw_input("Naesta mynd eda quit:")
示例#10
0
# load a dataset (e.g. AT&T Facedatabase)
dataSet = DataSet("/root/libface/img/yalefaces")
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# define a 1-NN classifier with Euclidean Distance
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
model.compute(dataSet.data, dataSet.labels) 
#try to recgonize
im = Image.open("/root/libface/img/reg.jpg")
im = im.convert("L")
ar = []
ar.append(np.asarray(im, dtype=np.uint8))
print(dataSet.names[model.predict(ar)])
# turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)

"""
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
	e = model.feature.eigenvectors[:,i].reshape(dataSet.data[0].shape)
	E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.pdf")
# perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(dataSet.data, dataSet.labels)
print cv
"""
示例#11
0
            # If we're learning, skip back to the top of the loop
            #
            continue

        #
        # If we don't have anything in the database, skip the recognition part
        #
        if X == []:
            break;  
          
        #
        # Do we recognize the current face?
        # The "predict" method will return the closest match of the current image to the database
        #
        finalimage = sampleImage & facefilter
        [ predicted_label, generic_classifier_output] = model.predict(finalimage)

        #
        # Determine if the prediction is within a certain "threshold".  This is actually the 
        # "distance" between the image and the database.   The closer the distance is to "0", the 
        # closer a match it really is.
        #
        # Higher thresholds result in less accuracy or more mis-identified pictures.
        #
        if int(generic_classifier_output['distances'][0]) > current_threshold * 4:
            high=current_threshold * 4
        else:
            high=int(generic_classifier_output['distances'][0])

        #
        # The percentage is calculated to tell us how close we are to a perfect match we have to the current image
示例#12
0
def run():
    # This is where we write the images, if an output_dir is given
    # in command line:

    # out_dir = None

    # You'll need at least a path to your image data, please see
    # the tutorial coming with this source code on how to prepare
    # your image data:

    # if len(sys.argv) < 2:
    #     print ("USAGE: facerec_demo.py </path/to/images>")
    #     sys.exit()

    # Now read in the image data. This must be a valid path!

    # [X,y] = read_images(sys.argv[1])
    [X, y] = read_images('../data/trainset/')

    # dataset = FilesystemReader(sys.argv[1])
    # Then set up a handler for logging:
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    # Add handler to facerec modules, so we see what's going on inside:
    logger = logging.getLogger("facerec")
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    # Define the Fisherfaces as Feature Extraction method:
    feature = Fisherfaces()
    # Define a 1-NN classifier with Euclidean Distance:
    svm = SVM(C=0.1, kernel='rbf', degree=4, gamma='auto', coef0=0.0)
    knn = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # # Define the model as the combination
    model_svm = PredictableModel(feature=feature, classifier=svm)

    model_knn = PredictableModel(feature=feature, classifier=knn)

    # # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model_svm.compute(X, y)

    model_knn.compute(X, y)
    # E = []
    # for i in range(min(model.feature.eigenvectors.shape[1], 16)):
    #  e = model.feature.eigenvectors[:,i].reshape(X[0].shape)
    #  E.append(minmax_normalize(e,0,255, dtype=np.uint8))
    # subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.png")

    # cv = LeaveOneOutCrossValidation(model)
    # print(cv0)
    # cv0.validate(dataset.data,dataset.classes,print_debug=True)
    cv_svm = KFoldCrossValidation(model_svm, k=10)
    cv_knn = KFoldCrossValidation(model_knn, k=10)

    param_grid = [
        {
            'C': [0.05, 0.1, 0.3, 0.5, 1, 2, 5],
            'gamma': [0.001, 0.0001],
            'kernel': ['rbf']
        },
    ]
    [tX, tY] = read_images('../data/testset/')

    # cv_svm.validate(X, y)
    # cv_knn.validate(X, y)

    gs(model_svm, X, y, param_grid)

    count1 = 0
    count2 = 0
    for i in range(len(tY)):
        r1 = model_svm.predict(tX[i])
        r2 = model_knn.predict(tX[i])
        if r1[0] == tY[i]:
            count1 += 1
        if r2[0] == tY[i]:
            count2 += 1

    print('SVM ACC:{0}'.format(count1 / len(tY)))
    print('KNN ACC:{0}'.format(count2 / len(tY)))
    print(cv_knn.print_results())
    print(cv_svm.print_results())
示例#13
0
class FaceDatabase(object):
    def __init__(self,
                 database_folder,
                 feature_parameter="LPQ",
                 metric="chi",
                 k=3):
        self.model = None

        handler = logging.StreamHandler(sys.stdout)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger = logging.getLogger("facerec")
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)

        path = database_folder

        start = time.clock()
        input_faces = utils.read_images_from_single_folder(path)
        stop = time.clock()

        print("read {}, images from {} in {} seconds.".format(
            len(input_faces), path, stop - start))

        feature = None
        m = {
            "fisher": Fisherfaces,
            "fisher80": Fisherfaces,
            "pca": PCA,
            "pca10": PCA,
            "lda": LDA,
            "spatial": SpatialHistogram,
            "LPQ": SpatialHistogram
        }

        if feature_parameter in m:
            if feature_parameter == 'LPQ':
                feature = SpatialHistogram(LPQ())
                self.threshold = threshold_function(71.4, 70)
            elif feature_parameter == 'fisher80':
                feature = Fisherfaces(80)
                self.threshold = threshold_function(0.61, 0.5)
            elif feature_parameter == 'fisher':
                feature = Fisherfaces()
                self.threshold = threshold_function(0.61, 0.5)
            elif feature_parameter == 'pca80':
                feature = PCA(80)
            else:
                feature = m[feature_parameter]()

        metric_param = None
        d = {
            "euclid": EuclideanDistance,
            "cosine": CosineDistance,
            "normal": NormalizedCorrelation,
            "chi": ChiSquareDistance,
            "histo": HistogramIntersection,
            "l1b": L1BinRatioDistance,
            "chibrd": ChiSquareBRD
        }
        if metric in d:
            metric_param = d[metric]()
        else:
            metric_param = ChiSquareDistance()

        classifier = NearestNeighbor(dist_metric=metric_param, k=k)
        feature = ChainOperator(TanTriggsPreprocessing(), feature)
        # feature = ChainOperator(TanTriggsPreprocessing(0.1, 10.0, 1.0, 3.0), feature)
        self.model = PredictableModel(feature, classifier)

        # images in one list, id's on another
        id_list, face_list = zip(*input_faces)

        print "Train the model"
        start = time.clock()
        # model.compute(X, y)
        self.model.compute(face_list, id_list)
        stop = time.clock()
        print "Training done in", stop - start, " next...find a face"

        # threshold_lpq_normalized = threshold_function(0.67, 0.3)
        # threshold_lpq_chisquared = threshold_function(71.4, 70)
        # threshold_spatial_cosine = threshold_function(0.908, 0.908)
        # threshold_spatial_chisuearbrd = threshold_function()
        # threshold = threshold_lpq_normalized

    def find_face(self, input_face_image):
        assert self.model, "Model is not valid"
        res = self.model.predict(input_face_image)
        print res
        return self.threshold(res)
示例#14
0
    # Define a 1-NN classifier with Euclidean Distance:
    classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
    # Define the model as the combination
    model = PredictableModel(feature=feature, classifier=classifier)
    # Compute the Fisherfaces on the given data (in X) and labels (in y):
    model.compute(X, y)

    # Then turn the first (at most) 16 eigenvectors into grayscale
    # images (note: eigenvectors are stored by column!)
    # E = []
    # for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
    #     e = model.feature.eigenvectors[:, i].reshape(X[0].shape)
    #     E.append(minmax_normalize(e, 0, 255, dtype=np.uint8))

    img_path = 'rawand1.jpg'
    coverted_img_path = "temp_%s" % img_path
    detect_face(img_path, outfile=coverted_img_path)
    img = Image.open(coverted_img_path)
    img = img.convert("L")

    p = model.predict(img)[0]
    label = keys[p]
    print label

    [X, y, keys] = read_images("../faces2/", keys=keys)
    model.classifier.update(X, y)

    p = model.predict(img)[0]
    label = keys[p]
    print label
示例#15
0
    rval, frame = vc.read()



    img = frame
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.2, 3)

    for (x,y,w,h) in faces:
        
        cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        
        sampleImage = gray[y:y+h, x:x+w]
        sampleImage = cv2.resize(sampleImage, (256,256))

        #capiamo di chi è sta faccia
        [ predicted_label, generic_classifier_output] = model.predict(sampleImage)
        print [ predicted_label, generic_classifier_output]
        #scelta la soglia a 700. soglia maggiore di 700, accuratezza minore e v.v.
        if int(generic_classifier_output['distances']) <=  700:
            cv2.putText(img,'tu sei : '+str(subject_dictionary[predicted_label]), (x,y), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,250),3,1)
    cv2.imshow('result',img)
    if cv2.waitKey(10) == 27:
        break



cv2.destroyAllWindows()
vc.release()

示例#16
0
文件: face_rec2.py 项目: orvitinn/msc
    # threshold_lpq_normalized = threshold_function(0.67, 0.3)
    threshold_lpq_chisquared = threshold_function(70, 35)
    # threshold_spatial_cosine = threshold_function(0.908, 0.908)
    # threshold_spatial_chisuearbrd = threshold_function()

    # threshold = threshold_lpq_normalized
    threshold = threshold_lpq_chisquared
    # threshold = threshold_spatial_cosine

    for image, id in test_list:
        target_full_name = os.path.join(test_path, image)
        prufu_mynd = utils.read_image(target_full_name)
        # prufu_mynd = fp.process_image(utils.read_image(target_full_name))

        if prufu_mynd is not None:
            res = model.predict(prufu_mynd)
            found_id = threshold(res) # result_from_res(res)
            print found_id, ",", id
        else:
            print "Gat ekki opnað prufumynd"

    """
    p1 = fp.process_image(utils.read_image("/Users/matti/Documents/forritun/att_faces/arora_01.jpg"))
    p2 = utils.read_image("/Users/matti/Dropbox/Skjöl/Meistaraverkefni/server/test_faces_to_search_for/arora_01.png")
    res1 = model.predict(p1)
    res2 = model.predict(p2)
    print res1
    print res2

    """
    """
示例#17
0


    img = frame
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.2, 3)

    for (x,y,w,h) in faces:
        
        cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        
        sampleImage = gray[y:y+h, x:x+w]
        sampleImage = cv2.resize(sampleImage, (256,256))

        #capiamo di chi è sta faccia
        [ predicted_label, generic_classifier_output] = model.predict(sampleImage)
        print [ predicted_label, generic_classifier_output]
        #scelta la soglia a 700. soglia maggiore di 700, accuratezza minore e v.v.
        if int(generic_classifier_output['distances']) <=  700:
            cv2.putText(img,'tu sei : '+str(subject_dictionary[predicted_label]), (x,y), cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,250),3,1)
    cv2.imshow('result',img)
    if cv2.waitKey(10) == 27:
        break



cv2.destroyAllWindows()
vc.release()


示例#18
0
mod3.compute(Xtrain,ytrain)
mod4.compute(Xtrain,ytrain)
mod5.compute(Xtrain,ytrain)
mod6.compute(Xtrain,ytrain)
mod7.compute(Xtrain,ytrain)
mod8.compute(Xtrain,ytrain)
mod9.compute(Xtrain,ytrain)
mod10.compute(Xtrain,ytrain)


#For Training Size 3

p=np.array(np.ones(len(Xtest))*9,dtype=int)
count=0
for i in range(len(Xtest)):
     d10=mod10.predict(Xtest[i])
     if (d10[1]['distances']<0.33):
         count+=1
         p[i]=int(d10[0])
#         print 'mod9',(d10[1]['distances']),p[i],ytest[i]
         continue
    d9=mod9.predict(Xtest[i])
    if (d9[1]['distances']<40):
        count+=1
        p[i]=int(d9[0])
#         print 'mod9',abs(d9[1]['distances']),p[i],ytest[i]
        continue
     d6=mod6.predict(Xtest[i])
     if (abs(d6[1]['distances'])>0.68):
         count+=1
         p[i]=int(d6[0])
示例#19
0
文件: facedb.py 项目: orvitinn/msc
class FaceDatabase(object):

    def __init__(self, database_folder, feature_parameter="LPQ", metric="chi", k=3):
        self.model = None
        
        handler = logging.StreamHandler(sys.stdout)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger = logging.getLogger("facerec")
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)

        path = database_folder

        start = time.clock()
        input_faces = utils.read_images_from_single_folder(path)
        stop = time.clock()

        print("read {}, images from {} in {} seconds.".format(len(input_faces), path, stop-start))

        feature = None
        m = {
          "fisher": Fisherfaces,
          "fisher80": Fisherfaces,
          "pca": PCA,
          "pca10": PCA,
          "lda": LDA,
          "spatial": SpatialHistogram,
          "LPQ": SpatialHistogram
        }

        if feature_parameter in m:
            if feature_parameter == 'LPQ':
                feature = SpatialHistogram(LPQ())
                self.threshold = threshold_function(71.4, 70)
            elif feature_parameter == 'fisher80':
                feature = Fisherfaces(80)
                self.threshold = threshold_function(0.61, 0.5)
            elif feature_parameter == 'fisher':
                feature = Fisherfaces()
                self.threshold = threshold_function(0.61, 0.5)
            elif feature_parameter == 'pca80':
                feature = PCA(80)
            else:
                feature = m[feature_parameter]()

        metric_param = None
        d = {"euclid": EuclideanDistance,
             "cosine": CosineDistance,
             "normal": NormalizedCorrelation,
             "chi":  ChiSquareDistance,
             "histo": HistogramIntersection,
             "l1b": L1BinRatioDistance,
             "chibrd": ChiSquareBRD
             }
        if metric in d:
            metric_param = d[metric]()
        else:
            metric_param = ChiSquareDistance()

        classifier = NearestNeighbor(dist_metric=metric_param, k=k)
        feature = ChainOperator(TanTriggsPreprocessing(), feature)
        # feature = ChainOperator(TanTriggsPreprocessing(0.1, 10.0, 1.0, 3.0), feature)
        self.model = PredictableModel(feature, classifier)

        # images in one list, id's on another
        id_list, face_list = zip(*input_faces)

        print "Train the model"
        start = time.clock()
        # model.compute(X, y)
        self.model.compute(face_list, id_list)
        stop = time.clock()
        print "Training done in", stop-start, " next...find a face"

        # threshold_lpq_normalized = threshold_function(0.67, 0.3)
        # threshold_lpq_chisquared = threshold_function(71.4, 70)
        # threshold_spatial_cosine = threshold_function(0.908, 0.908)
        # threshold_spatial_chisuearbrd = threshold_function()
        # threshold = threshold_lpq_normalized

    def find_face(self, input_face_image):
        assert self.model, "Model is not valid"
        res = self.model.predict(input_face_image)
        print res
        return self.threshold(res)