def detect(self, im): ''' @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye) ''' result = [] rects = self.face_detector.detect(im) # Anotate Faces for rect in rects: # Transform the face affine = pv.AffineFromRect(rect, self.tile_size) cropped = affine.transformImage(im) for _ in range(self.n_iter): cropped = pv.meanStd(cropped) # Find the eyes data = cropped.asMatrix2D().flatten() data = np.array(data, 'd').flatten() data = self.normalize.normalizeVector(data) pleye = self.left_locator.predict(data) preye = self.right_locator.predict(data) pleye = affine.invertPoint(pleye) preye = affine.invertPoint(preye) # Seccond Pass affine = pv.AffineFromPoints(pleye, preye, self.left_eye, self.right_eye, self.tile_size) cropped = affine.transformImage(im) #affine = AffineFromPoints(pleye,preye,self.left_eye,self.right_eye,self.tile_size) #reg = affine.transformImage(im) reg = cropped if self.validate != None and not self.validate(reg): # Validate the face. if self.annotate: im.annotateRect(rect, color='red') im.annotatePoint(pleye, color='red') im.annotatePoint(preye, color='red') continue if self.annotate: reg.annotatePoint(self.left_eye, color='green') reg.annotatePoint(self.right_eye, color='green') im.annotatePoint(pleye, color='green') im.annotatePoint(preye, color='green') im.annotateRect(rect, color='green') result.append((reg, rect, pleye, preye)) return result
def setUp(self): SCRAPS_FACE_DATA = os.path.join(pv.__path__[0],"data","csuScrapShots") self.test_images = [] self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt")) for filename in self.eyes.files()[0:10]: im = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm")) eyes = self.eyes.getEyes(filename) #print eyes affine = pv.AffineFromPoints(eyes[0][0],eyes[0][1],pv.Point(40,40),pv.Point(88,40),(128,128)) im = affine.transformImage(im) self.test_images.append(im)
def detect(self, im): ''' @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye) ''' result = [] rects = self.face_detector.detect(im) # Anotate Faces for rect in rects: # Transform the face laffine, raffine = self.generateTransforms(rect) lcropped = laffine.transformImage(im) rcropped = raffine.transformImage(im) #Normalize the images lcropped = pv.meanStd(lcropped) rcropped = pv.meanStd(rcropped) pleye = self.left_locator.predict(lcropped) preye = self.right_locator.predict(rcropped) pleye = laffine.invertPoint(pleye) preye = raffine.invertPoint(preye) affine = pv.AffineFromPoints(pleye, preye, self.left_eye, self.right_eye, self.tile_size) reg = affine.transformImage(im) if self.validate != None and not self.validate(reg): # Validate the face. if self.annotate: im.annotateRect(rect, color='red') im.annotatePoint(pleye, color='red') im.annotatePoint(preye, color='red') continue if self.annotate: reg.annotatePoint(self.left_eye, color='green') reg.annotatePoint(self.right_eye, color='green') im.annotatePoint(pleye, color='green') im.annotatePoint(preye, color='green') im.annotateRect(rect, color='green') result.append((reg, rect, pleye, preye)) return result
def preprocess(self, im, leye, reye, ilog=None): im = pv.Image(im.asPIL()) affine = pv.AffineFromPoints(leye, reye, self.leye, self.reye, self.tile_size) tile = affine.transformImage(im) mat = tile.asMatrix2D() # High pass filter the image mat = mat - ndimage.gaussian_filter(mat, self.norm_sigma) # Value normalize the image. mat = mat - mat.mean() mat = mat / mat.std() tile = pv.Image(mat) return tile
def reduce_exp4(source_dir, dest_dir): '''''' print("Creating directories.") try: os.makedirs(os.path.join(dest_dir, 'recordings')) except: pass try: os.makedirs(os.path.join(dest_dir, 'sigsets')) except: pass print("Loading FRGC Information.") frgc = FRGC_Exp4(source_dir) print("Processing Images.") keys = list(frgc.keys()) for i in range(len(keys)): key = keys[i] face = frgc[key] print("Processing %d of %d:" % (i + 1, len(keys)), key, face.person_id) affine = pv.AffineFromPoints(face.left_eye, face.right_eye, REDUCED_LEYE, REDUCED_REYE, REDUCED_SIZE) tile = affine.transformImage(face.image) tile.asPIL().save(os.path.join(dest_dir, 'recordings', key + ".jpg"), quality=95) #if i > 10: # break print("Copying sigsets.") shutil.copy(frgc.orig_sigset_path, os.path.join(dest_dir, 'sigsets')) shutil.copy(frgc.query_sigset_path, os.path.join(dest_dir, 'sigsets')) shutil.copy(frgc.target_sigset_path, os.path.join(dest_dir, 'sigsets')) shutil.copy(frgc.training_sigset_path, os.path.join(dest_dir, 'sigsets')) print("Copying metadata.") shutil.copy(frgc.metadata_path, os.path.join(dest_dir, 'sigsets'))
def genderClassifier(clsfy, ilog=None): ''' genderClassifier takes a classifier as an argument and will use the csuScrapShot data to perform a gender classification test on that classifier. These three functions will be called:: for im in training_images: clsfy.addTraining(label,im,ilog=ilog) clsfy.train(ilog=ilog) for im in testing_images: clsfy.predict(im,ilog=ilog) label = 0 or 1 (0=Female,1=Male) im is a 64x64 pyvision image that is normalized to crop the face Output of predict should be a class label (0 or 1) @returns: the success rate for the testing set. ''' filename = os.path.join(pv.__path__[0], 'data', 'csuScrapShots', 'gender.txt') f = open(filename, 'r') image_cache = [] examples = [] for line in f: im_name, class_name = line.split() if class_name == 'F': class_name = 0 else: class_name = 1 long_name = os.path.join(pv.__path__[0], 'data', 'csuScrapShots', im_name) leye, reye = SCRAPS_EYES.getEyes(im_name)[0] im = pv.Image(long_name) image_cache.append(im) im = pv.AffineFromPoints(leye, reye, pv.Point(22, 27), pv.Point(42, 27), (64, 64)).transformImage(im) #im = pv.Image(im.asPIL().resize((64,64))) examples.append([class_name, im, im_name]) training = examples[:103] testing = examples[103:] for each in training[:103]: clsfy.addTraining(each[0], each[1], ilog=ilog) clsfy.train(ilog=ilog) table = pv.Table() values = {0: [], 1: []} correct = 0 total = 0 for each in testing: label = clsfy.predict(each[1], ilog=ilog) total += 1 if label == each[0]: correct += 1 rate = float(correct) / total if ilog: ilog.table(table) return rate
# print the list of rectangles print("Face Detection Output:", rects) # Also call the eye detector like a function with the original image and # the list of face detections to locate the eyes. eyes = el(im, rects) # print the list of eyes. Format [ [ face_rect, left_eye, right_eye], ...] print("Eye Locator Output:", eyes) # Now you can process the detection and eye data for each face detected in the # image. Here we annotate the image with the face detection box and # eye coordinates and we create create a normalized face image by translating # rotating and scaling the face using pv.AffineFromPoints for face_rect, left_eye, right_eye in eyes: # Annotate the original image im.annotateRect(face_rect, color='red') im.annotatePoint(left_eye, color='yellow') im.annotatePoint(right_eye, color='yellow') # Align the eye coordinates to produce a face tile. This is a typical # step before running a face verification algorithm. affine = pv.AffineFromPoints(left_eye, right_eye, pv.Point(32.0, 64.0), pv.Point(96.0, 64.0), (128, 160)) tile = affine.transformImage(im) ilog(tile, "NormalizedFace") # Finally, display the annotate image. ilog(im, "DetectionData") ilog.show()
def cropFace(self, im, eyes): left, right = eyes affine = pv.AffineFromPoints(left, right, self.left_eye, self.right_eye, self.face_size) im = affine.transformImage(im) return im