def detect(image): image_size = cv.GetSize(image) # create grayscale version grayscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(image, grayscale, cv.BGR2GRAY) # create storage storage = cv.CreateMemStorage(0) cv.ClearMemStorage(storage) # equalize histogram cv.EqualizeHist(grayscale, grayscale) # detect objects cascade = cv.LoadHaarClassifierCascade('haarcascade_frontalface_alt.xml', cv.Size(1, 1)) faces = cv.HaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.HAAR_DO_CANNY_PRUNING, cv.Size(50, 50)) if faces: print 'face detected!' for i in faces: cv.Rectangle(image, cv.Point(int(i.x), int(i.y)), cv.Point(int(i.x + i.width), int(i.y + i.height)), cv.RGB(0, 255, 0), 3, 8, 0)
def updateEyes(self): self.eyeTopline = self.ypt + ((self.height*1)/3); self.eyeBotline = self.ypt + ((self.height*1)/2); self.eyeLeft1 = cv.Point(self.xpt + (self.width/5),self.eyeTopline); self.eyeLeft2 = cv.Point(self.xpt + ((self.width*3)/8), self.eyeBotline); self.eyeRight1 = cv.Point(self.xpt + ((self.width*5)/8),self.eyeTopline); self.eyeRight2 = cv.Point(self.xpt + ((self.width*4)/5),self.eyeBotline);
def detect(self, image): size = cv.GetSize(image) # create grayscale version grayscale = cv.CreateImage(size, 8, 1) cv.CvtColor(image, grayscale, cv.BGR2GRAY) # create and clear storage storage = cv.CreateMemStorage(0) cv.ClearMemStorage(storage) # equalize histogram cv.EqualizeHist(grayscale, grayscale) # detect faces faces = cv.HaarDetectObjects(grayscale, self.face_cascade, storage, 1.2, 2, cv.HAAR_DO_CANNY_PRUNING, self.face_size) if faces: # faces detected for i in faces: cv.Rectangle(image, cv.Point(int(i.x), int(i.y)), cv.Point(int(i.x + i.width), int(i.y + i.height)), cv.RGB(0, 255, 0), 3, 8, 0) detected = True is_face = True else: # detect body bodies = cv.HaarDetectObjects(grayscale, self.body_cascade, storage, 1.1, 3, 0, self.body_size) if bodies: # body detected for i in bodies: cv.Rectangle( image, cv.Point(int(i.x), int(i.y)), cv.Point(int(i.x + i.width), int(i.y + i.height)), cv.RGB(0, 255, 0), 3, 8, 0) detected = True is_face = False else: detected = False is_face = False # release resources we don't need any more cv.ReleaseImage(grayscale) cv.ReleaseMemStorage(storage) return (detected, is_face)
def getContours(self, image): cv.Smooth(image, image, CVtypes.CV_GAUSSIAN, 17, 17) #cv.CvtColor(image,self.modifiedImage,CVtypes.CV_GRAY2RGB); cv.Threshold(image, image, 128, 255, CVtypes.CV_THRESH_BINARY) #cv.CvtColor(image,self.modifiedImage,CVtypes.CV_GRAY2RGB); cv.Canny(image, image, 50, 200) #cv.CvtColor(image,self.modifiedImage,CVtypes.CV_GRAY2RGB); #cv.CvtColor(self.grayImage,self.modifiedImage,CVtypes.CV_GRAY2RGB); #cv.Copy(frame,self.modifiedImage) #return contour = POINTER(cv.Seq)() if 1: cv.FindContours( image, #self.grayImage, self.cvStorage[0], byref(contour), sizeof(cv.Contour), CVtypes.CV_RETR_CCOMP, CVtypes.CV_CHAIN_APPROX_SIMPLE, cv.Point(0, 0), ) contourBlocks = [] if contour: cSeq = cast(contour, POINTER(cv.Seq)) ellipses = [] while cSeq: contours = [] total = cSeq[0].total if total >= 6: box = cv.FitEllipse2(cSeq) ellipses.append(box) #print #print for i in range(total): next = CVtypes.CV_GET_SEQ_ELEM(cv.Seq, cSeq, i) nContour = cast(next, POINTER(cv.Contour)) rect = nContour[0].rect contours.append(rect) #print rect contourBlocks.append(contours) cSeq = cast(cSeq[0].h_next, POINTER(cv.Seq)) #print ellipses[0] #print if 1: if contourBlocks: bboxes = [] for contours in contourBlocks: c0 = contours[0] bbox = [self.size[0] + 1, self.size[1] + 1, -1, -1] for i, c in enumerate(contours): #if i==(len(contours)-1): break x0, y0, x1, y1 = c.x, c.y, c.width, c.height if ((x0 > self.size[0]) or (x1 > self.size[0]) or (y0 > self.size[1]) or (y1 > self.size[1]) or (x0 <= 0) or (x1 <= 0) or (y0 <= 0) or (y1 <= 0)): x0, y0, x1, y1 = c0.x, c0.y, c0.width, c0.height continue if x0 < bbox[0]: bbox[0] = x0 if x1 < bbox[0]: bbox[0] = x0 if x0 > bbox[2]: bbox[2] = x0 if x1 > bbox[2]: bbox[2] = x0 if y0 < bbox[1]: bbox[1] = y0 if y1 < bbox[1]: bbox[1] = y0 if y0 > bbox[3]: bbox[3] = y0 if y1 > bbox[3]: bbox[3] = y0 cv.Line( self.modifiedImage, cv.Point(x0, y0), cv.Point(x1, y1), #cv.Point(contours[i+1].x,contours[i+1].y), CVtypes.CV_RGB(255, 0, 0), 1, 1, ) bboxes.append(bbox) for bbox in bboxes: cv.Rectangle( self.modifiedImage, cv.Point(bbox[0], bbox[1]), cv.Point(bbox[2], bbox[3]), CVtypes.CV_RGB(0, 255, 255), 1, 8, ) cx, cy = 0, 0 num = 0 for e in ellipses: x = e.center.x y = e.center.y center = cv.Point(int(x), int(y)) size = cv.Size(int(e.size.width), int(e.size.height)) if size.width > self.size[0] or size.height > self.size[1]: continue cx += x cy += y num += 1 angle = -e.angle cv.Ellipse( self.modifiedImage, center, size, angle, 0, 360, CVtypes.CV_RGB(0, 255, 255), ) if num: cx /= num cy /= num cv.Circle( self.modifiedImage, cv.Point(int(cx), int(cy)), 20, CVtypes.CV_RGB(255, 255, 0), ) cv.DrawContours( self.modifiedImage, contour, CVtypes.CV_RGB(255, 0, 0), CVtypes.CV_RGB(0, 255, 0), 1, 1, )
def detect_and_draw(img ,cascade): global age global trackedFaces global plotpoints t = cv.GetTickCount() ## start counter cv.CvtColor( img, gray, cv.BGR2GRAY ) cv.Resize( gray, small_img, cv.INTER_LINEAR ) cv.ClearMemStorage( storage ) #Ages all trackedFaces for f in trackedFaces: f.updateLife() #Remove expired faces for f in trackedFaces: if (f.isTooOld()): trackedFaces.remove(f) faces = cv.HaarDetectObjects( small_img, cascade, storage, haar_scale, min_neighbors, haar_flags, min_size ) drawline = 0 if faces: #found a face for r in faces: matchedFace = False; pt1 = cv.Point( int(r.x*image_scale), int(r.y*image_scale)) pt2 = cv.Point( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) ) #check if there are trackedFaces if (len(trackedFaces) > 0): #each face being tracked for f in trackedFaces: #the face is found (small movement) if ((abs(f.xpt - pt1.x) < FACE_MAX_MOVEMENT) and (abs(f.ypt - pt1.y) < FACE_MAX_MOVEMENT)): matchedFace = True; f.updateFace(int(r.width*image_scale), int(r.height*image_scale), pt1.x, pt1.y); #f.updateFace(r.width*image_scale, r.height*image_scale, pt1.x, pt1.y); mf = f; break; #if face not found, add a new face if (matchedFace == False): f = Face(0,int(r.width*image_scale), int(r.height*image_scale), pt1.x, pt1.y,0); trackedFaces.append(f); mf = f; #No tracked faces: adding one else: f = Face(0,int (r.width*image_scale), int (r.height*image_scale), pt1.x, pt1.y,0); trackedFaces.append(f); mf = f; #where to draw face and properties if (mf.age > 5): #draw attention line lnpt1 = cv.Point (int (mf.xpt*scale), int(mf.ypt*scale-5)-5) if (mf.age > mf.width): lnpt2 = cv.Point (int (mf.xpt*scale+mf.width), int(mf.ypt*scale-5)) else: lnpt2 = cv.Point (int (mf.xpt*scale+mf.age), int(mf.ypt*scale-5)) #cv.Line(img, lnpt1, lnpt2, RED, 2, 8, 0) ## drawing attention line cv.Rectangle(img, lnpt1, lnpt2, RED, 4, 8, 0) ## drawing bolded attention line ### draw eyes cv.Rectangle(img, mf.eyeLeft1, mf.eyeLeft2, MAGENTA, 3,8,0) cv.Rectangle(img, mf.eyeRight1, mf.eyeRight2, MAGENTA, 3,8,0) # ### draw mouth cv.Rectangle(img, mf.mouthTopLeft, mf.mouthBotRight, ORANGE, 3, 8, 0) # ### draw face cv.Rectangle( img, pt1, pt2, getColor(mf), 3, 8, 0 ) drawline = mf.age if(CAPTURING): saveAsJPG(img) if (osName == "nt"): cv.Flip(img, img, 0) cv.ShowImage ('Camera', img) t = cv.GetTickCount() - t ## counter for FPS print "%i fps." % (cv.GetTickFrequency()*1000000./t) ## print FPS
def updateMouth(self): self.mouthTopline = self.ypt + ((self.height*2)/3); self.mouthBotline = self.ypt + self.height; self.mouthTopLeft = cv.Point(self.xpt + self.width/5, self.mouthTopline); self.mouthBotRight = cv.Point(self.xpt + (self.width*4)/5, self.mouthBotline);
def detect(image): image_size = cv.GetSize(image) # create grayscale version grayscale = cv.CreateImage(image_size, 8, 1) cv.CvtColor(image, grayscale, cv.BGR2GRAY) # create storage storage = cv.CreateMemStorage(0) cv.ClearMemStorage(storage) # equalize histogram cv.EqualizeHist(grayscale, grayscale) # detect objects cascade = cv.LoadHaarClassifierCascade('haarcascade_frontalface_alt.xml', cv.Size(1, 1)) faces = cv.HaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.HAAR_DO_CANNY_PRUNING, cv.Size(50, 50)) if faces: print 'face detected!' for i in faces: cv.Rectangle(image, cv.Point(int(i.x), int(i.y)), cv.Point(int(i.x + i.width), int(i.y + i.height)), cv.RGB(0, 255, 0), 3, 8, 0) # create windows cv.NamedWindow('Camera', cv.WINDOW_AUTOSIZE) # create capture device device = 0 # assume we want first device capture = cv.CreateCameraCapture(0) cv.SetCaptureProperty(capture, cv.CAP_PROP_FRAME_WIDTH, 640) cv.SetCaptureProperty(capture, cv.CAP_PROP_FRAME_HEIGHT, 480) # check if capture device is OK if not capture: print "Error opening capture device" sys.exit(1) while 1: # do forever # capture the current frame frame = cv.QueryFrame(capture) if frame is None: break # mirror cv.Flip(frame, None, 1) # face detection detect(frame) # display webcam image cv.ShowImage('Camera', frame) # handle events k = cv.WaitKey(10) if k == 0x1b: # ESC print 'ESC pressed. Exiting ...' break
cv.SetZero(histimg) # compute the width for each bin do display bin_w = histimg[0].width / hdims for i in range(hdims): # for all the bins # get the value, and scale to the size of the hist image val = int( round(cv.GetReal1D(hist[0].bins, i) * histimg[0].height / 255)) # compute the color color = hsv2rgb(i * 180. / hdims) # draw the rectangle in the wanted color cv.Rectangle(histimg, cv.Point(i * bin_w, histimg[0].height), cv.Point((i + 1) * bin_w, histimg[0].height - val), color, -1, 8, 0) # we can now display the images cv.ShowImage('Camera', frame) cv.ShowImage('Histogram', histimg) # handle events k = cv.WaitKey(10) if k == 0x1b: # user has press the ESC key, so exit break