def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye, right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect, true_rect): laffine, raffine = self.generateTransforms(pred_rect) lcropped = laffine.transformImage(im) rcropped = raffine.transformImage(im) #Normalize the images lcropped = pv.meanStd(lcropped) rcropped = pv.meanStd(rcropped) # Mark the eyes leye = laffine.transformPoint(left_eye) reye = raffine.transformPoint(right_eye) # Add training data to locators self.left_locator.addTraining(lcropped, leye) self.right_locator.addTraining(rcropped, reye) # Just use the first success return # The face was not detected self.detection_failures += 1
def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye,right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect,true_rect): laffine,raffine = self.generateTransforms(pred_rect) lcropped = laffine.transformImage(im) rcropped = raffine.transformImage(im) #Normalize the images lcropped = pv.meanStd(lcropped) rcropped = pv.meanStd(rcropped) # Mark the eyes leye = laffine.transformPoint(left_eye) reye = raffine.transformPoint(right_eye) # Add training data to locators self.left_locator.addTraining(lcropped,leye) self.right_locator.addTraining(rcropped,reye) # Just use the first success return # The face was not detected self.detection_failures += 1
def detect(self, im): ''' @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye) ''' result = [] rects = self.face_detector.detect(im) # Anotate Faces for rect in rects: # Transform the face laffine, raffine = self.generateTransforms(rect) lcropped = laffine.transformImage(im) rcropped = raffine.transformImage(im) #Normalize the images lcropped = pv.meanStd(lcropped) rcropped = pv.meanStd(rcropped) pleye = self.left_locator.predict(lcropped) preye = self.right_locator.predict(rcropped) pleye = laffine.invertPoint(pleye) preye = raffine.invertPoint(preye) affine = pv.AffineFromPoints(pleye, preye, self.left_eye, self.right_eye, self.tile_size) reg = affine.transformImage(im) if self.validate != None and not self.validate(reg): # Validate the face. if self.annotate: im.annotateRect(rect, color='red') im.annotatePoint(pleye, color='red') im.annotatePoint(preye, color='red') continue if self.annotate: reg.annotatePoint(self.left_eye, color='green') reg.annotatePoint(self.right_eye, color='green') im.annotatePoint(pleye, color='green') im.annotatePoint(preye, color='green') im.annotateRect(rect, color='green') result.append((reg, rect, pleye, preye)) return result
def detect(self, im): ''' @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye) ''' result = [] rects = self.face_detector.detect(im) # Anotate Faces for rect in rects: # Transform the face laffine,raffine = self.generateTransforms(rect) lcropped = laffine.transformImage(im) rcropped = raffine.transformImage(im) #Normalize the images lcropped = pv.meanStd(lcropped) rcropped = pv.meanStd(rcropped) pleye = self.left_locator.predict(lcropped) preye = self.right_locator.predict(rcropped) pleye = laffine.invertPoint(pleye) preye = raffine.invertPoint(preye) affine = pv.AffineFromPoints(pleye,preye,self.left_eye,self.right_eye,self.tile_size) reg = affine.transformImage(im) if self.validate != None and not self.validate(reg): # Validate the face. if self.annotate: im.annotateRect(rect,color='red') im.annotatePoint(pleye,color='red') im.annotatePoint(preye,color='red') continue if self.annotate: reg.annotatePoint(self.left_eye,color='green') reg.annotatePoint(self.right_eye,color='green') im.annotatePoint(pleye,color='green') im.annotatePoint(preye,color='green') im.annotateRect(rect,color='green') result.append((reg,rect,pleye,preye)) return result
def detect(self, im): ''' @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye) ''' result = [] rects = self.face_detector.detect(im) # Anotate Faces for rect in rects: # Transform the face affine = pv.AffineFromRect(rect, self.tile_size) cropped = affine.transformImage(im) for _ in range(self.n_iter): cropped = pv.meanStd(cropped) # Find the eyes data = cropped.asMatrix2D().flatten() data = np.array(data, 'd').flatten() data = self.normalize.normalizeVector(data) pleye = self.left_locator.predict(data) preye = self.right_locator.predict(data) pleye = affine.invertPoint(pleye) preye = affine.invertPoint(preye) # Seccond Pass affine = pv.AffineFromPoints(pleye, preye, self.left_eye, self.right_eye, self.tile_size) cropped = affine.transformImage(im) #affine = AffineFromPoints(pleye,preye,self.left_eye,self.right_eye,self.tile_size) #reg = affine.transformImage(im) reg = cropped if self.validate != None and not self.validate(reg): # Validate the face. if self.annotate: im.annotateRect(rect, color='red') im.annotatePoint(pleye, color='red') im.annotatePoint(preye, color='red') continue if self.annotate: reg.annotatePoint(self.left_eye, color='green') reg.annotatePoint(self.right_eye, color='green') im.annotatePoint(pleye, color='green') im.annotatePoint(preye, color='green') im.annotateRect(rect, color='green') result.append((reg, rect, pleye, preye)) return result
def detect(self, im): ''' @returns: a list of tuples where each tuple contains (registered_image, detection_rect, left_eye, right_eye) ''' result = [] rects = self.face_detector.detect(im) # Anotate Faces for rect in rects: # Transform the face affine = pv.AffineFromRect(rect,self.tile_size) cropped = affine.transformImage(im) for _ in range(self.n_iter): cropped = pv.meanStd(cropped) # Find the eyes data = cropped.asMatrix2D().flatten() data = np.array(data,'d').flatten() data = self.normalize.normalizeVector(data) pleye = self.left_locator.predict(data) preye = self.right_locator.predict(data) pleye = affine.invertPoint(pleye) preye = affine.invertPoint(preye) # Seccond Pass affine = pv.AffineFromPoints(pleye,preye,self.left_eye,self.right_eye,self.tile_size) cropped = affine.transformImage(im) #affine = AffineFromPoints(pleye,preye,self.left_eye,self.right_eye,self.tile_size) #reg = affine.transformImage(im) reg = cropped if self.validate != None and not self.validate(reg): # Validate the face. if self.annotate: im.annotateRect(rect,color='red') im.annotatePoint(pleye,color='red') im.annotatePoint(preye,color='red') continue if self.annotate: reg.annotatePoint(self.left_eye,color='green') reg.annotatePoint(self.right_eye,color='green') im.annotatePoint(pleye,color='green') im.annotatePoint(preye,color='green') im.annotateRect(rect,color='green') result.append((reg,rect,pleye,preye)) return result
def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye, right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect, true_rect): # Transform the face affine = pv.AffineFromRect(pred_rect, self.tile_size) w, h = self.tile_size if self.perturbations: # Randomly rotate, translate and scale the images center = pv.AffineTranslate(-0.5 * w, -0.5 * h, self.tile_size) rotate = pv.AffineRotate(random.uniform(-pi / 8, pi / 8), self.tile_size) scale = pv.AffineScale(random.uniform(0.9, 1.1), self.tile_size) translate = pv.AffineTranslate( random.uniform(-0.05 * w, 0.05 * w), random.uniform(-0.05 * h, 0.05 * h), self.tile_size) inv_center = pv.AffineTranslate(0.5 * w, 0.5 * h, self.tile_size) affine = inv_center * translate * scale * rotate * center * affine #affine = affine*center*rotate*scale*translate*inv_center cropped = affine.transformImage(im) cropped = pv.meanStd(cropped) # Mark the eyes leye = affine.transformPoint(left_eye) reye = affine.transformPoint(right_eye) # Add training data to locators self.training_labels.append((leye, reye)) self.normalize.addTraining(0.0, cropped) #self.left_locator.addTraining(cropped,leye) #self.right_locator.addTraining(cropped,reye) # Just use the first success return # The face was not detected self.detection_failures += 1
def computeVector(self, img): '''Creates a vector from a face''' #face = img.asPIL().crop(rect.box()).resize(self.face_size,ANTIALIAS) vec = img.asMatrix2D().flatten() if self.norm == PCA_MEAN_STD_NORM: vec = pv.meanStd(vec) if self.norm == PCA_MEAN_UNIT_NORM: vec = pv.meanUnit(vec) if self.norm == PCA_UNIT_NORM: vec = pv.unit(vec) return vec
def computeVector(self,img): '''Creates a vector from a face''' #face = img.asPIL().crop(rect.box()).resize(self.face_size,ANTIALIAS) vec = img.asMatrix2D().flatten() if self.norm == PCA_MEAN_STD_NORM: vec = pv.meanStd(vec) if self.norm == PCA_MEAN_UNIT_NORM: vec = pv.meanUnit(vec) if self.norm == PCA_UNIT_NORM: vec = pv.unit(vec) return vec
def test_1_meanStd(self): '''meanStd Normalization: norm.mean() = 0.0 and norm.std() = 1.0....''' ilog = None if 'ilog' in list(globals().keys()): ilog = globals()['ilog'] norm = pv.meanStd(self.tile) if ilog != None: ilog.log(norm,label="meanStd_Normalization") mat = norm.asMatrix2D() self.assertAlmostEqual(mat.mean(),0.0,places=3) self.assertAlmostEqual(mat.std(),1.0,places=3)
def test_1_meanStd(self): '''meanStd Normalization: norm.mean() = 0.0 and norm.std() = 1.0....''' ilog = None if 'ilog' in list(globals().keys()): ilog = globals()['ilog'] norm = pv.meanStd(self.tile) if ilog != None: ilog.log(norm, label="meanStd_Normalization") mat = norm.asMatrix2D() self.assertAlmostEqual(mat.mean(), 0.0, places=3) self.assertAlmostEqual(mat.std(), 1.0, places=3)
def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye,right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect,true_rect): # Transform the face affine = pv.AffineFromRect(pred_rect,self.tile_size) w,h = self.tile_size if self.perturbations: # Randomly rotate, translate and scale the images center = pv.AffineTranslate(-0.5*w,-0.5*h,self.tile_size) rotate = pv.AffineRotate(random.uniform(-pi/8,pi/8),self.tile_size) scale = pv.AffineScale(random.uniform(0.9,1.1),self.tile_size) translate = pv.AffineTranslate(random.uniform(-0.05*w,0.05*w), random.uniform(-0.05*h,0.05*h), self.tile_size) inv_center = pv.AffineTranslate(0.5*w,0.5*h,self.tile_size) affine = inv_center*translate*scale*rotate*center*affine #affine = affine*center*rotate*scale*translate*inv_center cropped = affine.transformImage(im) cropped = pv.meanStd(cropped) # Mark the eyes leye = affine.transformPoint(left_eye) reye = affine.transformPoint(right_eye) # Add training data to locators self.training_labels.append((leye,reye)) self.normalize.addTraining(0.0,cropped) #self.left_locator.addTraining(cropped,leye) #self.right_locator.addTraining(cropped,reye) # Just use the first success return # The face was not detected self.detection_failures += 1