def findEyes(self): eye_cascade = cv2.CascadeClassifier( 'C:\\OpenCV\\data\\haarcascades\\haarcascade_eye.xml') eyes = eye_cascade.detectMultiScale(self.img) im_toshow = copy.deepcopy(self.img) eyeArr = [] for (ex, ey, ew, eh) in eyes: cv2.rectangle(im_toshow, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) eyeArr.append((ex, ey, ew, eh)) # filter by area eyeAreas = map(self.areaRect, eyeArr) ix = np.argmax(eyeAreas) eye1 = eyeArr[ix] eyeAreas.pop(ix) eyeArr.pop(ix) ix = np.argmax(eyeAreas) eye2 = eyeArr[ix] ex1, ey1, ew1, eh1 = eye1 eye1loc = Point(ex1 + ew1 / 2, ey1 + eh1 / 2) ex2, ey2, ew2, eh2 = eye2 eye2loc = Point(ex2 + ew2 / 2, ey2 + eh2 / 2) # cv2.circle( self.img, ( int(eye1loc.x), int(eye1loc.y) ), 1, (255,0,0) ) # cv2.circle( self.img, ( int(eye2loc.x), int(eye2loc.y) ), 1, (255,0,0) ) #showImg( im_toshow ) return eye1loc, eye2loc
def filterFoundEyes(eyes, img): if len(eyes) > 1: ## if multiple eyes are returned, get biggest one eyeArr = [] for (ex, ey, ew, eh) in eyes: #cv2.rectangle(im_toshow,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) eyeArr.append((ex, ey, ew, eh)) # filter by area eyeAreas = map(PreProcessing.areaRect, eyeArr) ix = np.argmax(eyeAreas) eye1 = eyeArr[ix] eyeAreas.pop(ix) eyeArr.pop(ix) ix = np.argmax(eyeAreas) eye2 = eyeArr[ix] ex1, ey1, ew1, eh1 = eye1 ex2, ey2, ew2, eh2 = eye2 else: ## just one eye, estimate the other ex, ey, ew, eh = eyes[0] w0 = np.shape(img)[1] if ex < w0 / 2: ex1, ey1, ew1, eh1 = [ex, ey, ew, eh] ex2, ey2, ew2, eh2 = [ex + w0 / 5, ey, ew, eh] else: ex1, ey1, ew1, eh1 = [ex - w0 / 5, ey, ew, eh] ex2, ey2, ew2, eh2 = [ex, ey, ew, eh] eye1loc = Point(ex1 + ew1 / 2, ey1 + eh1 / 2) eye2loc = Point(ex2 + ew2 / 2, ey2 + eh2 / 2) return eye1loc, eye2loc
def calcNormScale(self, shape): if self.n == 68: d = Point.dist(shape.shapePoints[self.leftEyeIx], shape.shapePoints[self.rightEyeIx]) else: rc = ActiveShape.centroid(ActiveShape(shape.shapePoints[31:35])) lc = ActiveShape.centroid(ActiveShape(shape.shapePoints[27:31])) d = Point.dist(rc, lc) s = float(1) / float(d) return s
def calcR(self): """ Calculates distance matrix between all points for a given shape sets global variable """ sp = self.shapePoints ## For every point in shapePoints, calculate distance to other points R = [[Point.dist(sp[k], sp[l]) for k in range(self.n)] for l in range(self.n)] return R
def crop(img, fh, fw): ## In terms of x, y not h, w !!! # or the other way>>> # Using eyes eye1, eye2 = PreProcessing.eyeDetection(img) d = Point.dist(eye1, eye2) newH = int( fh * d) #set more firmly for other types of images for consistent size newW = int(fw * d) y0, x0 = PreProcessing.newEyeLoc(eye1, eye2, newH, newW, d) return img[y0:y0 + newH, x0:x0 + newW]
def calcShift(self, point, method): ## get point p (ix 48) # Get initial gradient at point ( unitV already ) # vector, magnitude, origin #print "original: %f, %f" % (point.x, point.y) f, m, pt = self.getGradient(point, self.img) #print "returned: %f, %f" % (point.x, point.y) h, w = np.shape(self.img) cX = pt.x cY = pt.y # Goal is to find maximal maxM = 1 cnt = 0 while (True): ## move point according to maximal magnitude response in area ## normalized according to current magnitude/maximal magnitude dx = f[0] * float(m) / float(maxM) dy = f[1] * float(m) / float(maxM) ## too far from original point if cnt > self.maxPx: # print point.x + dx, point.y + dy return dx, dy cnt += 1 ## move point one unit in direction of gradient cX = cX + f[0] cY = cY + f[1] if (cX > w - 2 or cY > h - 2 or cX < 1 or cY < 1): # print point.x + dx, point.y + dy return dx, dy _, cM, _ = self.getGradient(Point(cX, cY), self.img) if cM > maxM: maxM = cM
def alignEyes(self, eye1, eye2): x = ActiveShape.createShape(self.x) f, [[ax1, ax2], [ax3, ax4]] = plt.subplots(2, 2) # distance between eyes: d1 = Point.dist(eye1, eye2) rc = ActiveShape.centroid(ActiveShape(x.shapePoints[31:35])) lc = ActiveShape.centroid(ActiveShape(x.shapePoints[27:31])) if self.asm.n == 68: d2 = Point.dist(x.shapePoints[self.asm.rightEyeIx], x.shapePoints[self.asm.leftEyeIx]) else: d2 = Point.dist(rc, lc) s = float(d1 / d2) shape = copy.deepcopy(x) DrawFace(shape, ax1).drawBold() shape = shape.scale(s) DrawFace(shape, ax2).drawBold() rot, thetaRot = self.asm.calcNormRotateImg(shape) shape = shape.rotate(rot) DrawFace(shape, ax3).drawBold() ax1.invert_yaxis() ax2.invert_yaxis() ax3.invert_yaxis() rc = ActiveShape.centroid(ActiveShape(shape.shapePoints[31:35])) lc = ActiveShape.centroid(ActiveShape(shape.shapePoints[27:31])) if self.asm.n == 68: t = [[(eye2.x - shape.shapePoints[self.asm.leftEyeIx].x)], [(eye2.y - shape.shapePoints[self.asm.leftEyeIx].y)]] else: t = [[(eye2.x - rc.x)], [(eye2.y - rc.y)]] shape = shape.translate(t) ### Check that initial shape is within image frame tempS = 0 nr, nc = np.shape(self.img) for pt in shape.shapePoints: if pt.y > nr: # print "y big" tempS += (pt.y - nr + 10) / nr if pt.x > nc: # print "x big" tempS += (pt.x - nc + 10) / nc if tempS != 0: shape = shape.scale(1 - tempS) if self.asm.n == 68: t = [[(eye2.x - shape.shapePoints[self.asm.leftEyeIx].x)], [(eye2.y - shape.shapePoints[self.asm.leftEyeIx].y)]] else: rc = ActiveShape.centroid(ActiveShape( shape.shapePoints[31:35])) lc = ActiveShape.centroid(ActiveShape( shape.shapePoints[27:31])) t = [[(eye2.x - rc.x)], [(eye2.y - rc.y)]] shape = shape.translate(t) #print "row: %d\tcol:%d" % ( pt.y, pt.x ) # print np.shape(self.img ) DrawFace(shape, ax4).drawBold() ax4.scatter(eye1.x, eye1.y, c='r') ax4.scatter(eye2.x, eye2.y, c='g') ax4.imshow(self.img, cmap='gray') f.show() plt.savefig(os.path.join(self.out, "deform-init.png")) plt.gca().invert_yaxis() plt.close() srot = np.dot(s, rot) transDict = { 't': t, 's': s, 'rot': rot, 'srot': srot, 'theta': thetaRot } return shape, transDict
def shapeDist(self, shape): d = map(lambda x, y: Point.dist(x, y), self.shapePoints, shape.shapePoints) return d
def centroid(self): return Point(np.mean(self.xs), np.mean(self.ys))