def testDetectorDOG6(self): detector = DetectorDOG(selector='all') filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_37.jpg') im = Image(filename,bw_annotate=True) points = detector.detect(im) for score,pt,radius in points: im.annotatePoint(pt) self.assertEquals(len(points),561)
def testDetectorCorner5(self): detector = DetectorCorner(selector='best') filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_37.jpg') im = Image(filename,bw_annotate=True) points = detector.detect(im) for score,pt,radius in points: im.annotatePoint(pt) if self.SHOW_IMAGES: im.show() self.assertEquals(len(points),250)
def testDetectorDOG1(self): detector = DetectorDOG(selector='best',n=100) filename = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_1.jpg') im = Image(filename,bw_annotate=True) points = detector.detect(im) #print len(points) for score,pt,radius in points: im.annotateCircle(pt,radius) #im.show() self.assertEquals(len(points),100)
def setUp(self): self.images = [] self.names = [] self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA, "coords.txt")) for filename in self.eyes.files(): img = Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm")) self.images.append(img) self.names.append(filename) self.assert_(len(self.images) == 173)
def test_pca_scraps(self): face_test = FaceRecognitionTest.FaceRecognitionTest( name='PCA_CSUScraps', score_type=FaceRecognitionTest.SCORE_TYPE_HIGH) pca = PCA(drop_front=2, basis_vectors=55) for im_name in self.eyes.files(): im = Image(os.path.join(SCRAPS_FACE_DATA, im_name + ".pgm")) rect = self.eyes.getFaces(im_name) eyes = self.eyes.getEyes(im_name) pca.addTraining(im, rect=rect[0], eyes=eyes[0]) pca.train() face_records = {} for im_name in self.eyes.files(): im = Image(os.path.join(SCRAPS_FACE_DATA, im_name + ".pgm")) rect = self.eyes.getFaces(im_name) eyes = self.eyes.getEyes(im_name) fr = pca.computeFaceRecord(im, rect=rect[0], eyes=eyes[0]) face_records[im_name] = fr for i_name in face_records.keys(): scores = [] for j_name in face_records.keys(): similarity = pca.similarity(face_records[i_name], face_records[j_name]) scores.append((j_name, similarity)) face_test.addSample(i_name, scores) #print face_test.rank1_bounds self.assertAlmostEqual(face_test.rank1_rate, 0.43930635838150289) self.assertAlmostEqual(face_test.rank1_bounds[0], 0.3640772723094895) self.assertAlmostEqual(face_test.rank1_bounds[1], 0.51665118592791259) roc = face_test.getROCAnalysis() # Test based of fpr=0.01 roc_point = roc.getFAR(far=0.01)
def getBasis(self): basis = self.pca.getBasis() images = [] print basis.shape r, c = basis.shape for i in range(r): im = basis[i, :] im = im.reshape(self.face_size) im = Image(im) images.append(im) print len(images) return images
def test_prev_ref1(self): fname = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_13.jpg') im = Image(fname) ref = weakref.ref(im) self.assertEquals(ref(), im) tmp = im del im self.assertEquals(ref(), tmp) del tmp self.assertEquals(ref(), None)
def testDetectorCorner5(self): detector = DetectorCorner(selector='best') filename = os.path.join(pyvision.__path__[0], 'data', 'nonface', 'NONFACE_37.jpg') im = Image(filename, bw_annotate=True) points = detector.detect(im) for score, pt, radius in points: im.annotatePoint(pt) if self.SHOW_IMAGES: im.show() self.assertEquals(len(points), 250)
def test_prev_ref2(self): fname = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_13.jpg') im = Image(fname) #im.show() w,h = im.size # Try scaling down and then scaling back up tmp1 = AffineScale(0.1,(w/10,h/10)).transformImage(im) #tmp1.show() tmp2 = AffineScale(10.0,(w,h)).transformImage(tmp1,use_orig=False) tmp2.annotateLabel(pv.Point(10,10), "This image should be blurry.") #tmp2.show() tmp3 = AffineScale(10.0,(w,h)).transformImage(tmp1,use_orig=True) tmp3.annotateLabel(pv.Point(10,10), "This image should be sharp.") #tmp3.show() del im tmp4 = AffineScale(10.0,(w,h)).transformImage(tmp1,use_orig=True) tmp4.annotateLabel(pv.Point(10,10), "This image should be blurry.")
def test_prev_ref3(self): fname = os.path.join(pv.__path__[0],'data','nonface','NONFACE_13.jpg') torig = tprev = taccu = im = Image(fname) #im.show() w,h = im.size # Scale aff = AffineScale(0.5,(w/2,h/2)) accu = aff torig = aff.transformImage(torig) tprev = aff.transformImage(tprev,use_orig=False) taccu = accu.transformImage(im) torig.annotateLabel(pv.Point(10,10), "use_orig = True") tprev.annotateLabel(pv.Point(10,10), "use_orig = False") taccu.annotateLabel(pv.Point(10,10), "accumulated") #torig.show() #tprev.show() #taccu.show() # Translate aff = AffineTranslate(20,20,(w/2,h/2)) accu = aff*accu torig = aff.transformImage(torig) tprev = aff.transformImage(tprev,use_orig=False) taccu = accu.transformImage(im) torig.annotateLabel(pv.Point(10,10), "use_orig = True") tprev.annotateLabel(pv.Point(10,10), "use_orig = False") taccu.annotateLabel(pv.Point(10,10), "accumulated") #torig.show() #tprev.show() #taccu.show() # Rotate aff = AffineRotate(np.pi/4,(w/2,h/2)) accu = aff*accu torig = aff.transformImage(torig) tprev = aff.transformImage(tprev,use_orig=False) taccu = accu.transformImage(im) torig.annotateLabel(pv.Point(10,10), "use_orig = True") tprev.annotateLabel(pv.Point(10,10), "use_orig = False") taccu.annotateLabel(pv.Point(10,10), "accumulated") #torig.show() #tprev.show() #taccu.show() # Translate aff = AffineTranslate(100,-10,(w/2,h/2)) accu = aff*accu torig = aff.transformImage(torig) tprev = aff.transformImage(tprev,use_orig=False) taccu = accu.transformImage(im) torig.annotateLabel(pv.Point(10,10), "use_orig = True") tprev.annotateLabel(pv.Point(10,10), "use_orig = False") taccu.annotateLabel(pv.Point(10,10), "accumulated") #torig.show() #tprev.show() #taccu.show() # Scale aff = AffineScale(2.0,(w,h)) accu = aff*accu torig = aff.transformImage(torig) tprev = aff.transformImage(tprev,use_orig=False) taccu = accu.transformImage(im) torig.annotateLabel(pv.Point(10,10), "use_orig = True") tprev.annotateLabel(pv.Point(10,10), "use_orig = False") taccu.annotateLabel(pv.Point(10,10), "accumulated")
def setUp(self): fname = os.path.join(pyvision.__path__[0],'data','nonface','NONFACE_13.jpg') self.test_image = Image(fname)
def transformImage(self,im, use_orig=True, inverse=False): ''' Transforms an image into the new coordinate system. If this image was produced via an affine transform of another image, this method will attempt to trace weak references to the original image and directly compute the new image from that image to improve accuracy. To accomplish this a weak reference to the original source image and the affine matrix used for the transform are added to any image produced by this method. This can be disabled using the use_orig parameter. @param im: an Image object @param use_orig: (True or False) attempts to find and use the original image as the source to avoid an accumulation of errors. @returns: the transformed image ''' #TODO: does not support opencv images. see Perspective.py prev_im = im if inverse: inverse = self.matrix else: inverse = self.inverse if use_orig: # Find the oldest image used to produce this one by following week # references. # Check to see if there is an aff_prev list if hasattr(prev_im,'aff_prev'): # If there is... search that list for the oldest image found_prev = False for i in range(len(prev_im.aff_prev)): ref,cmat = prev_im.aff_prev[i] if not found_prev and ref(): im = ref() mat = np.eye(3) found_prev = True if found_prev: mat = np.dot(mat,cmat) if found_prev: inverse = np.dot(mat,inverse) if im.getType() == TYPE_PIL: data = inverse[:2,:].flatten() #data = (matrix[0,0],matrix[0,1],matrix[0,2],matrix[1,0],matrix[1,1],matrix[1,2]) pil = im.asPIL().transform(self.size, AFFINE, data, self.filter) result = Image(pil) elif im.getType() == TYPE_MATRIX_2D: mat = im.asMatrix2D() mat = affine_transform(mat, self.inverse[:2,:2], offset=self.inverse[:2,2]) result = Image(mat) elif im.getType() == TYPE_OPENCV: matrix = pv.NumpyToOpenCV(self.matrix) src = im.asOpenCV() dst = cv.CreateImage( (self.size[0],self.size[1]), cv.IPL_DEPTH_8U, src.nChannels ); cv.WarpPerspective( src, dst, matrix, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS,cv.ScalarAll(128)) result = pv.Image(dst) else: raise NotImplementedError("Unhandled image type for affine transform.") # Check to see if there is an aff_prev list for this object if use_orig and hasattr(prev_im,'aff_prev'): # Create one if not result.aff_prev = copy.copy(prev_im.aff_prev) else: result.aff_prev = [] # Append the prev image and new transform result.aff_prev.append( (weakref.ref(prev_im), self.inverse) ) return result
def transformImage(self,im_a, use_orig=True, inverse=False): ''' Transforms an image into the new coordinate system. If this image was produced via an affine transform of another image, this method will attempt to trace weak references to the original image and directly compute the new image from that image to improve accuracy. To accomplish this a weak reference to the original source image and the affine matrix used for the transform are added to any image produced by this method. This can be disabled using the use_orig parameter. @param im_a: an Image object @param use_orig: (True or False) attempts to find and use the original image as the source to avoid an accumulation of errors. @returns: the transformed image ''' #TODO: does not support opencv images. see Perspective.py prev_im = im_a if inverse: inverse = self.matrix else: inverse = self.inverse if use_orig: # Find the oldest image used to produce this one by following week # references. # Check to see if there is an aff_prev list if hasattr(prev_im,'aff_prev'): # If there is... search that list for the oldest image found_prev = False for i in range(len(prev_im.aff_prev)): ref,cmat = prev_im.aff_prev[i] if not found_prev and ref(): im_a = ref() mat = np.eye(3) found_prev = True if found_prev: mat = np.dot(mat,cmat) if found_prev: inverse = np.dot(mat,inverse) if im_a.getType() == TYPE_PIL: data = inverse[:2,:].flatten() #data = (matrix[0,0],matrix[0,1],matrix[0,2],matrix[1,0],matrix[1,1],matrix[1,2]) pil = im_a.asPIL().transform(self.size, AFFINE, data, self.interpolate) result = Image(pil) elif im_a.getType() == TYPE_MATRIX_2D: # Transform a matrix 2d mat = im_a.asMatrix2D() mat = affine_transform(mat, self.inverse[:2,:2], offset=self.inverse[:2,2]) result = Image(mat[:self.size[0],:self.size[1]]) elif im_a.getType() == TYPE_MATRIX_RGB: # Transform a matrix 3d mat = im_a.asMatrix3D() c0 = mat[0,:,:] c1 = mat[1,:,:] c2 = mat[2,:,:] c0 = affine_transform(c0, self.inverse[:2,:2], offset=self.inverse[:2,2]) c1 = affine_transform(c1, self.inverse[:2,:2], offset=self.inverse[:2,2]) c2 = affine_transform(c2, self.inverse[:2,:2], offset=self.inverse[:2,2]) mat = np.array([c0,c1,c2],dtype=np.float32) result = Image(mat[:,:self.size[0],:self.size[1]]) elif im_a.getType() == TYPE_OPENCV2: # Transform an opencv 2 image src = im_a.asOpenCV2() dst = cv2.warpPerspective(src, self.matrix, self.size) result = pv.Image(dst) elif im_a.getType() == TYPE_OPENCV2BW: # Transform a bw opencv 2 image src = im_a.asOpenCV2BW() dst = cv2.warpPerspective(src, self.matrix, self.size) result = pv.Image(dst) else: raise NotImplementedError("Unhandled image type for affine transform.") # Check to see if there is an aff_prev list for this object if use_orig and hasattr(prev_im,'aff_prev'): # Create one if not result.aff_prev = copy.copy(prev_im.aff_prev) else: result.aff_prev = [] # Append the prev image and new transform result.aff_prev.append( (weakref.ref(prev_im), self.inverse) ) return result