def test_0(self): # test if the code runs without errors images = [cv.LoadImage(x) for x in lfwcrop_data.get_unique_lfw_training_images('data')[:100]] vectors = range(3, 64) feat = face_feature.Eigenfaces(images, vectors) out = imfeat.compute(feat, images[0]) self.assertEqual(len(feat.mean), feat.vectors.shape[0]) self.assertEqual(1, len(out)) self.assertEqual(61, feat.vectors.shape[1]) self.assertEqual(61, len(out[0]))
def test_0(self): # test if the code runs without errors images = [ cv.LoadImage(x) for x in lfwcrop_data.get_unique_lfw_training_images('data')[:100] ] vectors = range(3, 64) feat = face_feature.Eigenfaces(images, vectors) out = imfeat.compute(feat, images[0]) self.assertEqual(len(feat.mean), feat.vectors.shape[0]) self.assertEqual(1, len(out)) self.assertEqual(61, feat.vectors.shape[1]) self.assertEqual(61, len(out[0]))
def test_identity(self): # for now, test if the code runs without errors images = [cv.LoadImage(x) for x in random.sample( lfwcrop_data.get_unique_lfw_training_images('data'), 100)] test_image = 'data/exemplar1.jpg' im1 = cv.LoadImage(test_image) im2 = pil_to_cv(open(test_image)) feat = face_feature.Eigenfaces(images) out1 = imfeat.compute(feat, resize(im1, cv.GetSize(images[0])))[0] out2 = imfeat.compute(feat, resize(im2, cv.GetSize(images[0])))[0] print('||feat(cv) - feat(pil)|| = %g' % ( np.linalg.norm(out1 - out2)/len(out1))) print('||cv - pil|| = %g' % (np.linalg.norm( cv_to_array(im1)-cv_to_array(im2))/len(cv_to_array(im1)))) np.testing.assert_almost_equal(out1, out2)
if not cv.GetSize(im) == size: im_resized = cv.CreateImage(size, im.depth, im.nChannels) cv.Resize(im, im_resized, cv.INTER_LINEAR) im = im_resized return im def train(training_fns, pickle_fn, max_train_ims=3000, size=(64, 64)): # load the unique training images, and learn PCA print('Training Eigenfaces feature space (%i training images)...' % ( len(training_fns))) if len(training_fns) <= max_train_ims: train_ims = [resize_im(cv.LoadImage(fn, 1), size) for fn in training_fns] else: train_ims = [resize_im(cv.LoadImage(fn, 1), size) for fn in random.sample(training_fns, max_train_ims)] feat = face_feature.Eigenfaces(train_ims) with open(pickle_fn, 'w') as fp: cPickle.dump(feat, fp) if __name__ == '__main__': if(len(sys.argv) > 1 and sys.argv[1] == 'flickr'): pickle_fn = 'data/eigenfaces_flickr.pkl' training_fns = get_flickr_training_images() else: import lfwcrop_data pickle_fn = 'data/eigenfaces_lfw_cropped.pkl' training_fns = lfwcrop_data.get_unique_lfw_training_images('data') train(training_fns, pickle_fn)
return im def train(training_fns, pickle_fn, max_train_ims=3000, size=(64, 64)): # load the unique training images, and learn PCA print('Training Eigenfaces feature space (%i training images)...' % (len(training_fns))) if len(training_fns) <= max_train_ims: train_ims = [ resize_im(cv.LoadImage(fn, 1), size) for fn in training_fns ] else: train_ims = [ resize_im(cv.LoadImage(fn, 1), size) for fn in random.sample(training_fns, max_train_ims) ] feat = face_feature.Eigenfaces(train_ims) with open(pickle_fn, 'w') as fp: cPickle.dump(feat, fp) if __name__ == '__main__': if (len(sys.argv) > 1 and sys.argv[1] == 'flickr'): pickle_fn = 'data/eigenfaces_flickr.pkl' training_fns = get_flickr_training_images() else: import lfwcrop_data pickle_fn = 'data/eigenfaces_lfw_cropped.pkl' training_fns = lfwcrop_data.get_unique_lfw_training_images('data') train(training_fns, pickle_fn)