Ejemplo n.º 1
0
def computeDescriptor(descriptor_type, database, image):
    detector = FaceDetector()
    alignment = LBFLandmarkDetector(detector="opencv", landmarks=68)
    face_normalization = FaceNormalization()
    face_normalization.setReferenceShape(alignment.getReferenceShape())
    pca = Pca(filename=os.path.join(config.models_path, "PCA_%s.txt" %
                                    database))
    lda = Lda(os.path.join(config.models_path, "LDA_%s.txt" % database))
    jb = pickleLoad(os.path.join(config.models_path, "JB_%s.txt" % database))
    descriptor = LbpDescriptor(descriptor_type, pca=pca, lda=lda)

    face = detector.detectFaces(image)
    if len(face) == 0:
        raise Exception("No faces detected")
    face = face[0]
    shape = alignment.detectLandmarks(image, face)

    copy = np.copy(image)
    cv2.rectangle(copy, face[:2], face[2:], (0, 0, 255), 1)
    for landmark in shape:
        cv2.circle(copy, tuple(landmark.astype(np.int)), 1, (0, 255, 0), -1)
    cv2.imshow("Face detection and alignment", copy)

    face_normalization.normalize(image, shape)
    image = image[49:201, 84:166]

    if "jb" in descriptor_type:
        desc = descriptor.compute(image, normalize=False)
        return jb.transform(desc[np.newaxis]).ravel()
    else:
        return descriptor.compute(image)
Ejemplo n.º 2
0
def computeDescriptors(data,
                       descriptor_type=ULBP_WPCA,
                       learned_models_files={},
                       normalize=True):

    if descriptor_type not in descriptor_types:
        raise Exception("Descriptor type unknown")

    pca = None
    if "pca" in descriptor_type:
        pca = Pca(filename=learned_models_files["pca"])

    lda = None
    if "lda" in descriptor_type:
        lda = Lda(learned_models_files["lda"])

    descriptor = LbpDescriptor(descriptor_type, pca=pca, lda=lda)

    sample = descriptor.compute(data[0])
    n_samples = data.shape[0]
    n_features = sample.shape[0]
    descriptors = np.empty((n_samples, n_features), dtype=sample.dtype)
    for i in xrange(n_samples):
        descriptors[i] = descriptor.compute(data[i], normalize=normalize)

    if "jb" in descriptor_type:
        jb = pickleLoad(learned_models_files["jb"])
        return jb.transform(descriptors)
    else:
        return descriptors
Ejemplo n.º 3
0
 def __init__(self,
              cell_size=24,
              step=2,
              scales=3,
              embed_spatial_information=False,
              filename=None):
     if filename:
         self = pickleLoad(filename)
     else:
         self.cell_size = cell_size
         self.step = step
         self.scales = scales
         self.embed_spatial_information = embed_spatial_information
Ejemplo n.º 4
0
    face_normalization.setReferenceShape(reference_shape)

    pca = Pca(filename=os.path.join(config.models_path, "PCA_%s.txt" %
                                    database_name))
    lda = Lda(os.path.join(config.models_path, "LDA_%s.txt" % database_name))
    descriptor = LbpDescriptor(descriptor_type, pca=pca, lda=lda)

    return face_normalization, descriptor


def computeDescriptor(image, (face_normalization, descriptor)):
    normalized_image = face_normalization.normalize(image, shape)
    normalized_image = normalized_image[49:201, 84:166]

    if "jb" in descriptor_type:
        jb = pickleLoad(
            os.path.join(config.models_path, "JB_%s.txt" % database_name))
        desc = descriptor.compute(normalized_image, normalize=False)
        return jb.transform(desc[np.newaxis]).ravel()
    else:
        return descriptor.compute(normalized_image)


if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="")
    parser.add_argument("input_video_file", help="video to process")
    parser.add_argument("-m",
                        dest="descriptor",
                        default="ulbp_pca_lda",
                        help="database")
    parser.add_argument("-d",
Ejemplo n.º 5
0
        default='fisher_vector.pkl',
        help='previously learnt (or partially learnt) fisher vector models')
    parser.add_argument('-o',
                        dest='output_file',
                        default='fisher_vector.pkl',
                        help='where to write computations results')
    parser.add_argument('-j',
                        dest='n_threads',
                        type=int,
                        default=1,
                        help='number of threads to use')
    args = parser.parse_args()

    base_path = "/rex/store1/home/tlorieul/"
    training_set = loadDevData(filename=(base_path + 'lfw/peopleDevTrain.txt'),
                               mapping_filename=(base_path +
                                                 'lfw/mapping.txt'))
    data = np.load(base_path + 'lfw/lfwa.npy')
    training_data = data[training_set]

    if args.command == 'pca_learning':
        fisher_vectors = FisherVectors(scales=5)
        fisher_vectors.computePcaOnLocalDescriptors(training_data,
                                                    n_pca_components=20)
        pickleSave(args.output_file, fisher_vectors)

    elif args.command == 'gmm_learning':
        fisher_vectors = pickleLoad(args.input_file)
        fisher_vectors.computeGMM(training_data, n_threads=args.n_threads)
        pickleSave(args.output_file, fisher_vectors)