示例#1
0
    def detect_faces(self, image_filename):
        """ Detects all faces and returns a list with
                images and corresponding coordinates"""

        logging.debug(
            'Start method "detect_faces" for file %s (face-detector.py)' %
            image_filename)
        cascade = cv.Load(parameter.cascadefile)  # load face cascade
        image = cv.LoadImage(image_filename)  # loads and converts image

        # detect and save coordinates of detected faces
        coordinates = cv.HaarDetectObjects(
            image, cascade, cv.CreateMemStorage(), parameter.scaleFactor,
            parameter.minNeighbors, parameter.flags, parameter.min_facesize)

        # Convert to greyscale - better results when converting AFTER facedetection with viola jones
        if image.channels == 3:
            logging.debug(
                'Bild %s wird in Graustufenbild umgewandelt (face-detector.py)'
                % image_filename)
            grey_face = (cv.CreateImage((image.width, image.height), 8,
                                        1))  # Create grey-scale Image
            cv.CvtColor(image, grey_face, cv.CV_RGB2GRAY
                        )  # convert Image to Greyscale (necessary for SURF)
            image = grey_face

        logging.debug(
            '%d faces successfully detected in file %s (face-detector.py)' %
            (len(coordinates), image_filename))
        return image, coordinates
示例#2
0
 def detect(self, obj, event):
     # First, reset image, in case of previous detections:
     active_handle = self.get_active('Media')
     media = self.dbstate.db.get_media_from_handle(active_handle)
     self.load_image(media)
     min_face_size = (50, 50)  # FIXME: get from setting
     self.cv_image = cv2.LoadImage(self.full_path,
                                   cv2.CV_LOAD_IMAGE_GRAYSCALE)
     o_width, o_height = self.cv_image.width, self.cv_image.height
     cv2.EqualizeHist(self.cv_image, self.cv_image)
     cascade = cv2.Load(HAARCASCADE_PATH)
     faces = cv2.HaarDetectObjects(self.cv_image, cascade,
                                   cv2.CreateMemStorage(0), 1.2, 2,
                                   cv2.CV_HAAR_DO_CANNY_PRUNING,
                                   min_face_size)
     references = self.find_references()
     rects = []
     o_width, o_height = [
         float(t) for t in (self.cv_image.width, self.cv_image.height)
     ]
     for ((x, y, width, height), neighbors) in faces:
         # percentages:
         rects.append((x / o_width, y / o_height, width / o_width,
                       height / o_height))
     self.draw_rectangles(rects, references)
示例#3
0
    def detect_no_draw(self, img):
        # allocate temporary images
        gray = cv.CreateImage((img.width, img.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(img.width / self.image_scale),
                                    cv.Round(img.height / self.image_scale)),
                                   8, 1)

        # convert color input image to grayscale
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        # scale input image for faster processing
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        if self.cascade:
            t = cv.GetTickCount()
            faces = cv.HaarDetectObjects(small_img, self.cascade,
                                         cv.CreateMemStorage(0),
                                         self.haar_scale, self.min_neighbors,
                                         self.haar_flags, self.min_size)
            t = cv.GetTickCount() - t
        if faces:
            return True
        else:
            return False
def DetectFace(image, faceCascade, returnImage=False):
    min_size = (20, 20)
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Equalize the histogram
    cv.EqualizeHist(image, image)

    # Detect the faces
    faces = cv.HaarDetectObjects(image, faceCascade, cv.CreateMemStorage(0),
                                 haar_scale, min_neighbors, haar_flags,
                                 min_size)

    # If faces are found
    if faces and returnImage:
        for ((x, y, w, h), n) in faces:
            # Convert bounding box to two CvPoints
            pt1 = (int(x), int(y))
            pt2 = (int(x + w), int(y + h))
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 5, 8, 0)

    if returnImage:
        return image
    else:
        return faces
def DetectFace(image, faceCascade, returnImage=False):
    # This function takes a grey scale cv image and finds
    # the patterns defined in the haarcascade function
    # modified from: http://www.lucaamore.com/?p=638

    #variables
    min_size = (20, 20)
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Equalize the histogram
    cv2.EqualizeHist(image, image)

    # Detect the faces
    faces = cv2.HaarDetectObjects(image, faceCascade, cv2.CreateMemStorage(0),
                                  haar_scale, min_neighbors, haar_flags,
                                  min_size)

    # If faces are found
    if faces and returnImage:
        for ((x, y, w, h), n) in faces:
            # Convert bounding box to two CvPoints
            pt1 = (int(x), int(y))
            pt2 = (int(x + w), int(y + h))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 5, 8, 0)

    if returnImage:
        return image
    else:
        return faces
示例#6
0
def DetectFace(image, faceCascade):
    #modified from: http://www.lucaamore.com/?p=638

    min_size = (20, 20)
    image_scale = 1
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Allocate the temporary images
    smallImage = cv2.CreateImage((cv2.Round(
        image.width / image_scale), cv2.Round(image.height / image_scale)), 8,
                                 1)

    # Scale input image for faster processing
    cv2.Resize(image, smallImage, cv2.CV_INTER_LINEAR)

    # Equalize the histogram
    cv2.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv2.HaarDetectObjects(smallImage, faceCascade,
                                  cv2.CreateMemStorage(0), haar_scale,
                                  min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 5, 8, 0)

    return image
示例#7
0
def getContours(im, approx_value=1):  #Return contours approximated
    storage = cv.CreateMemStorage(0)
    contours = cv.FindContours(cv.CloneImage(im), storage, cv.CV_RETR_CCOMP,
                               cv.CV_CHAIN_APPROX_SIMPLE)
    contourLow = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP,
                               approx_value, approx_value)
    return contourLow
示例#8
0
def DetectEyes(imageCV, faceCascade, eyeCascade):
    minSize = (20, 20)
    imageScale = 2
    haarScale = 1.2
    minNeighbors = 2
    haarFlags = 0

    # Allocate the temporary images
    #gray = cv2.CreateImage((imageCV.width, image.height), 8, 1)
    #smallImage = cv.CreateImage((cv.Round(image.width / image_scale), cv2.Round (image.height / image_scale)), 8 ,1)

    # Convert color input image to grayscale
    cv2.cvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv2.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv2.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv2.HaarDetectObjects(smallImage, faceCascade,
                                  cv2.CreateMemStorage(0), haar_scale,
                                  min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:

        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 3, 8, 0)
def find_leds(thresh_img):
    """
    Given a binary image showing the brightest pixels in an image, 
    returns a result image, displaying found leds in from PIL import Image
a rectangle
    """
    contours = cv2.FindContours(thresh_img,
                                cv2.CreateMemStorage(),
                                mode=cv2.CV_RETR_EXTERNAL,
                                method=cv2.CV_CHAIN_APPROX_NONE,
                                offset=(0, 0))

    regions = []
    while contours:
        pts = [pt for pt in contours]
        x, y = zip(*pts)
        min_x, min_y = min(x), min(y)
        width, height = max(x) - min_x + 1, max(y) - min_y + 1
        regions.append((min_x, min_y, width, height))
        contours = contours.h_next()

        out_img = cv2.CreateImage(cv2.GetSize(grey_img), 8, 3)
    for x, y, width, height in regions:
        pt1 = x, y
        pt2 = x + width, y + height
        color = (0, 0, 255, 0)
        cv2.Rectangle(out_img, pt1, pt2, color, 2)

    return out_img, regions
示例#10
0
def detect(image):
    image_faces = []
    bitmap = cv.fromarray(image)
    faces = cv.HaarDetectObjects(bitmap, cascade, cv.CreateMemStorage(0))
    if faces:
        for (x, y, w, h), n in faces:
            image_faces.append(image[y:(y + h), x:(x + w)])
            #cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,255),3)
    return image_faces
示例#11
0
def detect_faces(image_path, min_face_size):
    cv_image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
    o_width, o_height = cv_image.shape[0], cv_image.shape[1]
    cv2.equalizeHist(cv_image, cv_image)
    cascade = cv2.CascadeClassifier(HAARCASCADE_PATH)
    # ???
    faces = cv2.HaarDetectObjects(cv_image, cascade, cv2.CreateMemStorage(0),
                                  1.2, 2, cv2.CV_HAAR_DO_CANNY_PRUNING,
                                  min_face_size)
    return faces
示例#12
0
def faces_from_pil_image(pil_image):
    "Return a list of (x,y,h,w) tuples for faces detected in the PIL image"
    storage = cv.CreateMemStorage(0)
    facial_features = cv.Load('haarcascade_frontalface_alt.xml',
                              storage=storage)
    cv_im = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pil_image.tostring())
    faces = cv.HaarDetectObjects(cv_im, facial_features, storage)
    # faces includes a `neighbors` field that we aren't going to use here
    return [f[0] for f in faces]
示例#13
0
 def __init__(self, img0):
     self.thresh1 = 255
     self.thresh2 = 30
     self.level =4
     self.storage = cv.CreateMemStorage()
     cv.NamedWindow("Source", 0)
     cv.ShowImage("Source", img0)
     cv.NamedWindow("Segmentation", 0)
     cv.CreateTrackbar("Thresh1", "Segmentation", self.thresh1, 255, self.set_thresh1)
     cv.CreateTrackbar("Thresh2", "Segmentation",  self.thresh2, 255, self.set_thresh2)
     self.image0 = cv.CloneImage(img0)
     self.image1 = cv.CloneImage(img0)
     cv.ShowImage("Segmentation", self.image1)
示例#14
0
def detect_object(image):
    '''dect'''
    grayscale = cv2.CreateImage((image.width, image.height), 8, 1)
    cv2.CvtColor(image, grayscale, cv2.CV_BGR2GRAY)

    cascade = cv2.Load("data/haarcascades/haarcascade_frontalface_alt.xml")
    rect = cv2.HaarDetectObjects(grayscale, cascade, cv2.CreateMemStorage(),
                                 1.1, 2, cv2.CV_HAAR_DO_CANNY_PRUNING,
                                 (20, 20))

    result = []
    for r in rect:
        result.append((r[0][0], r[0][1], r[0][0] + r[0][2], r[0][1] + r[0][3]))

    return result
	def __init__(self, width, height, cascade_file="haarcascade_frontalface_alt.xml"):
		"""
		Detects faces in an image.
		@param width        Width of the images that will be supplied
		@param height       Height of the images that will be supplied
		@param cascade_file Haar cascade data file for fronts of faces
		"""
		
		# Load the cascade
		self.cascade = cv2.Load(cascade_file)
		
		# Storage for the algorithm to use
		self.storage = cv2.CreateMemStorage()
		
		# A grayscale buffer to copy images for processing into
		self.gray = cv2.CreateImage((width, height), 8, 1)
示例#16
0
def detect_object(image):
    '''检测图片,获取人脸在图片中的坐标'''
    grayscale = cv.CreateImage((image.width, image.height), 8, 1)
    cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)

    cascade = cv.Load(
        "/usr/local/opencv-2.4.9/data/haarcascades/haarcascade_frontalface_alt_tree.xml"
    )
    rect = cv.HaarDetectObjects(grayscale, cascade, cv.CreateMemStorage(), 1.1,
                                2, cv.CV_HAAR_DO_CANNY_PRUNING, (20, 20))

    result = []
    for r in rect:
        result.append((r[0][0], r[0][1], r[0][0] + r[0][2], r[0][1] + r[0][3]))

    return result
示例#17
0
def detect_object(image):
    print('aaa')
    '''检测图片,获取人脸在图片中的坐标'''

    grayscale = numpy.zeros(image.shape, numpy.uint8) # v2.CreateImage((image.width, image.height), 8, 1)
    cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)


    cascade = cv2.Load("C:/Users/polyv-1107/opencv-3.3.0/data/haarcascades/haarcascade_frontalface_alt_tree.xml")
    rect = cv2.HaarDetectObjects(grayscale, cascade, cv2.CreateMemStorage(), 1.1, 2,
        cv2.CV_HAAR_DO_CANNY_PRUNING, (20,20))

    result = []
    for r in rect:
        result.append((r[0][0], r[0][1], r[0][0]+r[0][2], r[0][1]+r[0][3]))

    return result
示例#18
0
 def somethingHasMoved(self):
     # Find contours
     storage = cv.CreateMemStorage(0)
     contours = cv.FindContours(self.gray_frame, storage,
                                cv.CV_RETR_EXTERNAL,
                                cv.CV_CHAIN_APPROX_SIMPLE)
     self.currentcontours = contours  # Save contours
     while contours:  # For all contours compute the area
         self.currentsurface += cv.ContourArea(contours)
         contours = contours.h_next()
     avg = (
         self.currentsurface * 100
     ) / self.surface  # Calculate the average of contour area on the total size
     self.currentsurface = 0  # Put back the current surface to 0
     if avg > self.threshold:
         return True
     else:
         return False
示例#19
0
def getPupil(frame):
	pupilImg = cv.CreateImage(cv.GetSize(frame), 8, 1)
	cv.InRangeS(frame, (30,30,30), (80,80,80), pupilImg)
	contours = cv.FindContours(pupilImg, cv.CreateMemStorage(0), mode = cv.CV_RETR_EXTERNAL)
	del pupilImg
	pupilImg = cv.CloneImage(frame)
	while contours:
		moments = cv.Moments(contours)
		area = cv.GetCentralMoment(moments,0,0)
		if (area > 50):
			pupilArea = area
			x = cv.GetSpatialMoment(moments,1,0)/area
			y = cv.GetSpatialMoment(moments,0,1)/area
			pupil = contours
			global centroid
			centroid = (int(x),int(y))
			cv.DrawContours(pupilImg, pupil, (0,0,0), (0,0,0), 2, cv.CV_FILLED)
			break
		contours = contours.h_next()
	return (pupilImg)
示例#20
0
    def detect_and_draw(self, img):

        # allocate temporary images
        gray = cv.CreateImage((img.width, img.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(img.width / self.image_scale),
                                    cv.Round(img.height / self.image_scale)),
                                   8, 1)

        # convert color input image to grayscale
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        # scale input image for faster processing
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        if self.cascade:
            t = cv.GetTickCount()
            faces = cv.HaarDetectObjects(small_img, self.cascade,
                                         cv.CreateMemStorage(0),
                                         self.haar_scale, self.min_neighbors,
                                         self.haar_flags, self.min_size)
            t = cv.GetTickCount() - t
            #		print "time taken for detection = %gms" % (t/(cv.GetTickFrequency()*1000.))
            if faces:
                face_found = True

                for ((x, y, w, h), n) in faces:
                    # the input to cv.HaarDetectObjects was resized, so scale the
                    # bounding box of each face and convert it to two CvPoints
                    pt1 = (int(x * self.image_scale),
                           int(y * self.image_scale))
                    pt2 = (int((x + w) * self.image_scale),
                           int((y + h) * self.image_scale))
                    cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            else:
                face_found = False

        cv.ShowImage("video", img)
        return face_found
示例#21
0
def showNaoImage(IP, PORT, camID):  # 参数分别为IP、PORT、摄像头ID(区分上下摄像头)

    #链接nao的摄像头
    camProxy = ALProxy("ALVideoDevice", IP, PORT)

    resolution = 2  # VGA``
    colorSpace = 11  # RGB
    videoClient = camProxy.subscribe("python_client", resolution, colorSpace,
                                     5)  # 设置分辨率、帧速、颜色空间

    t0 = time.time()
    camProxy.setParam(18, camID)  # 设置摄像头

    naoImage = camProxy.getImageRemote(videoClient)  #  将获取的图像赋给naoImage
    t1 = time.time()

    camProxy.unsubscribe(videoClient)
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    imagechannls = naoImage[2]  # naoImage[6]为imagedata

    frameArray = numpy.frombuffer(naoImage[6], dtype=numpy.uint8).reshape(
        imageHeight, imageWidth, imagechannls)
    cimg = cv2.cvtColor(frameArray, cv2.COLOR_BGR2HSV)
    gray = cv2.cvtColor(frameArray, cv2.COLOR_BGR2GRAY)  # 转换为BGR图像
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)  # 霍夫降噪 平滑处理
    thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]  # 取得二值图
    # im_cv = numpy.zeros((imageHeight, imageWidth, 3), numpy.uint8)#初始化图像im_cv
    #
    # im_cv.data = array  #将从摄像头获取的图像copy到im_cv,转为mat
    #
    # # 转化颜色空间由BGR到RGB
    # b, g, r = cv2.split(im_cv)
    # img1 = cv2.merge([r, g, b])
    # # 转mat到cvmat
    # img3 = cv2.fromarray(img1)
    # cv2.SaveImage("test22.bmp",img3)
    # #转换颜色空间到HSV
    # imgHSV = cv2.CreateImage(cv2.GetSize(img3), 8, 3)
    # cv2.CvtColor(img3, imgHSV, cv2.CV_RGB2HSV)
    #
    # cimg,cimg_c=hsvProceed(imgHSV,camID) # 调用hsvProceed处理图像,返回二值图
    #圈取最小矩形框
    #初始化
    storage = cv2.CreateMemStorage(0)
    cnts = cv2.FindContours(thresh, storage, cv2.RETR_LIST,
                            cv2.CHAIN_APPROX_SIMPLE)
    currtnt = cnts
    Area = 0
    left_right = 0
    up_down = 0
    #为不同摄像头设置不同筛选条件
    if camID == 0:
        areamax = 2500
        areamin = 40
        valuemin = 25
        value_w = 641
        valuemax = 481
    else:
        areamax = 5000
        areamin = 400
        valuemin = 0
        value_w = 500
        valuemax = 400

    while cnts:
        rect = cv2.BoundingRect(cnts, 0)  #获得单连通矩形框
        area = rect[2] * rect[3]  #获得矩形框面积
        #获得矩形框中心点坐标
        rect_center_x = rect[0] + rect[2] / 2
        rect_center_y = rect[1] + rect[3] / 2
        #调用choose0文件下的radio函数,筛选圆形部分
        # radio_c = choose0.radio(cimg_c,rect)

        radio = float(rect[2]) / rect[3]  #计算矩形框的长宽比
        #以下if语句均为筛选条件
        if rect[1] >= valuemin:
            if rect[1] <= valuemax:
                if rect[0] <= value_w:
                    if area > areamin:
                        if area < areamax:
                            if radio > 0.6:
                                if radio < 1.6:
                                    # if radio_c == 1:
                                    cv2.DrawContours(frameArray, cnts,
                                                     (255, 255, 0),
                                                     (255, 255, 0), 0,
                                                     1)  #画出单连通轮廓
                                    cv2.Rectangle(
                                        frameArray, (rect[0], rect[1]),
                                        (rect[0] + rect[2], rect[1] + rect[3]),
                                        (0, 0, 255), 1)  #画出矩形框

                                    rect_center_x = rect[0] + rect[2] / 2
                                    rect_center_y = rect[1] + rect[3] / 2
                                    #计算通过条件的矩形框的面积以及在图像中的位置
                                    Area = rect[2] * rect[3]
                                    left_right = rect_center_x - cimg.width / 2
                                    up_down = rect_center_y - cimg.height / 2

        cnts = cnts.h_next()

    return Area, left_right, up_down  #返回球的面积以及在图像中的位置
#BuildingES.py
#!/usr/bin/python

import cv2  #import the openCV lib to python
import serial  #import the pyserial module

#Module -1: Image Processing
hc = cv2.imread(
    '/home/george/PycharmProjects/Embeded image processing system/haarcascade_frontalface_alt2.xml'
)
img = cv2.imshow('/home/jayneil/beautiful-faces.jpg', 0)
faces = cv2.HaarDetectObjects(img, hc, cv2.CreateMemStorage())
a = 1
print(faces)
for (x, y, w, h), n in faces:
    cv2.Rectangle(img, (x, y), (x + w, y + h), 255)
cv2.SaveImage("faces_detected.jpg", img)
dst = cv2.imread('faces_detected.jpg')
cv2.NamedWindow('Face Detected', cv2.CV_WINDOW_AUTOSIZE)
cv2.imshow('Face Detected', dst)
cv2.WaitKey(5000)
cv2.DestroyWindow('Face Detected')

#Module -2: Trigger Pyserial
if faces == []:

    ser = serial.Serial('/dev/ttyUSB0', 9600)
    print(ser)
    ser.write('N')
else:
im = cv.imread('D:/1.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)

pi = math.pi  #Pi value

dst = cv.CreateImage(cv.GetSize(im), 8, 1)

cv.Canny(im, dst, 200, 200)
cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)

#---- Standard ----
color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.CvtColor(im, color_dst_standard,
            cv.CV_GRAY2BGR)  #Create output image in RGB to put red lines

lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD, 1,
                       pi / 180, 100, 0, 0)
for (rho, theta) in lines[:100]:
    a = math.cos(theta)  #Calculate orientation in order to print them
    b = math.sin(theta)
    x0 = a * rho
    y0 = b * rho
    pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
    pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
    cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
            4)  #Draw the line

#---- Probabilistic ----
color_dst_proba = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.CvtColor(im, color_dst_proba, cv.CV_GRAY2BGR)  # idem
示例#24
0
    # convert color input image to grayscale
    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    midFace = None

    if (cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            lights(50 if len(faces) == 0 else 0, 50 if len(faces) > 0 else 0,
                   0, 50)

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(frame, pt1, pt2, cv.RGB(100, 220, 255), 1, 8, 0)
                # get the xy corner co-ords, calc the midFace location
                x1 = pt1[0]
                x2 = pt2[0]
    def detectFace(self, cam_img, faceCascade, eyeCascade, mouthCascade):  # cam_img should be cv2.cv.iplcam_img
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        image_width = int(cam_img.get(cv.CV_CAP_PROP_FRAME_WIDTH))
        image_height = int(cam_img.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
        # Allocate the temporary images
        gray = cv.CreateImage((image_width, image_height), 8, 1)  # tuple as the first arg
        smallImage = cv.CreateImage((cv.Round(image_width / image_scale), cv.Round(image_height / image_scale)), 8, 1)

        (ok, img) = cam_img.read()
        # print 'gray is of ',type(gray) >>> gray is of  <type 'cv2.cv.iplimage'>
        # print type(smallImage)  >>> <type 'cv2.cv.iplimage'>
        # print type(image) >>> <type 'cv2.VideoCapture'>
        # print type(img) >>> <type 'numpy.ndarray'>

        # convert numpy.ndarray to iplimage
        ipl_img = cv2.cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(ipl_img, img.tostring(), img.dtype.itemsize * 3 * img.shape[1])

        # Convert color input image to grayscale
        cv.CvtColor(ipl_img, gray, cv.CV_BGR2GRAY)

        # Scale input image for faster processing
        cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

        # Equalize the histogram
        cv.EqualizeHist(smallImage, smallImage)

        # Detect the faces
        faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        # => The function returns a list of tuples, (rect, neighbors) , where rect is a CvRect specifying the object’s extents and neighbors is a number of neighbors.
        # => CvRect cvRect(int x, int y, int width, int height)
        # If faces are found
        if faces:
            face = faces[0]
            self.faceX = face[0][0]
            self.faceY = face[0][1]

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(ipl_img, pt1, pt2, cv.RGB(0, 0, 255), 3, 8, 0)
                # face_region = cv.GetSubRect(ipl_img,(x,int(y + (h/4)),w,int(h/2)))

            cv.SetImageROI(ipl_img, (pt1[0],
                                     pt1[1],
                                     pt2[0] - pt1[0],
                                     int((pt2[1] - pt1[1]) * 0.7)))

            eyes = cv.HaarDetectObjects(ipl_img, eyeCascade,
                                        cv.CreateMemStorage(0),
                                        haar_scale, min_neighbors,
                                        haar_flags, (15, 15))

            if eyes:
                # For each eye found
                for eye in eyes:
                    # Draw a rectangle around the eye
                    cv.Rectangle(ipl_img,  # image
                                 (eye[0][0],  # vertex pt1
                                  eye[0][1]),
                                 (eye[0][0] + eye[0][2],  # vertex pt2 opposite to pt1
                                  eye[0][1] + eye[0][3]),
                                 cv.RGB(255, 0, 0), 1, 4, 0)  # color,thickness,lineType(8,4,cv.CV_AA),shift

        cv.ResetImageROI(ipl_img)

        return ipl_img
示例#26
0
RIGHT = 3
UP = 4
DOWN = 5

#Color donstant definitions
RED = cv2.RGB(255, 0, 0)
GREEN = cv2.RGB(0, 220, 0)
BLUE = cv2.RGB(0, 0, 255)
YELLOW = cv2.RGB(255, 255, 0)
ORANGE = cv2.RGB(255, 127, 0)
MAGENTA = cv2.RGB(255, 0, 255)

# other constants
scale = 1
cascade = None
storage = cv2.CreateMemStorage(0)
cascade_name = "xml/haarcascade_frontalface_alt.xml"
min_size = (FACE_MIN_SIZE, FACE_MIN_SIZE)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = cv2.CV_HAAR_DO_CANNY_PRUNING
age = 0
trackedFaces = []
IPL_DEPTH_8U = 8
gray = 0
small_img = 0
osName = os.name
fname_temp = ""

### end of Face detection constants
示例#27
0
orig = cv.imread('lena.png')

#im = cv.CreateImage(cv.GetSize(orig), 8, 1)
im = cv.cvtColor(orig,cv.COLOR_BGR2GRAY)
#cv.CvtColor(orig, im, cv.CV_BGR2GRAY)
#Keep the original in colour to draw contours in the end

cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY)
cv.ShowImage("Threshold 1", im)

element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5, cv.CV_SHAPE_RECT)

cv.MorphologyEx(im, im, None, element, cv.CV_MOP_OPEN) #Open and close to make appear contours
cv.MorphologyEx(im, im, None, element, cv.CV_MOP_CLOSE)
cv.Threshold(im, im, 128, 255, cv.CV_THRESH_BINARY_INV)
cv.ShowImage("After MorphologyEx", im)
# --------------------------------

vals = cv.CloneImage(im) #Make a clone because FindContours can modify the image
contours=cv.FindContours(vals, cv.CreateMemStorage(0), cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))

_red = (0, 0, 255); #Red for external contours
_green = (0, 255, 0);# Gren internal contours
levels=2 #1 contours drawn, 2 internal contours as well, 3 ...
cv.DrawContours (orig, contours, _red, _green, levels, 2, cv.CV_FILLED) #Draw contours on the colour image

cv.ShowImage("Image", orig)
cv.WaitKey(0)

示例#28
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        width = frame.width
        height = frame.height
        surface = width * height  #Surface area of the image
        cursurface = 0  #Hold the current surface that have changed

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            color_image = cv.QueryFrame(self.capture)

            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3,
                      0)  #Remove false positives

            if not difference:  #For the first time put values in difference, temp and moving_average
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020,
                              None)  #Compute the average

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            #Convert the image so that it can be thresholded
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            cv.Dilate(grey_image, grey_image, None, 18)  #to get object blobs
            cv.Erode(grey_image, grey_image, None, 10)

            # Find contours
            storage = cv.CreateMemStorage(0)
            contours = cv.FindContours(grey_image, storage,
                                       cv.CV_RETR_EXTERNAL,
                                       cv.CV_CHAIN_APPROX_SIMPLE)

            backcontours = contours  #Save contours

            while contours:  #For all contours compute the area
                cursurface += cv.ContourArea(contours)
                contours = contours.h_next()

            avg = (
                cursurface * 100
            ) / surface  #Calculate the average of contour area on the total size
            if avg > self.ceil:
                print("Something is moving !")
            #print avg,"%"
            cursurface = 0  #Put back the current surface to 0

            #Draw the contours on the image
            _red = (0, 0, 255)
            #Red for external contours
            _green = (0, 255, 0)
            # Gren internal contours
            levels = 1  #1 contours drawn, 2 internal contours as well, 3 ...
            cv.DrawContours(color_image, backcontours, _red, _green, levels, 2,
                            cv.CV_FILLED)

            cv.ShowImage("Target", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
示例#29
0
    # Load camera source then start
    cam = camera.Camera('/dev/video0', SCREEN)
    cam.start()

    while 1:  # Ze loop

        time.sleep(1 / 120)  # 60 frames per second

        image = cam.get_image()  # Get current webcam image

        cv2_image = pygame_to_cv2image(
            image)  # Create cv2 image from pygame image

        # Detect faces then draw points on image
        # FIXME: Current bottleneck. Image has to be Grayscale to make it faster.
        #        One solution would be to use opencv2 instead of pygame for
        #        capturing images.
        storage = cv2.CreateMemStorage(-1)  # Create storage
        #points = detect_eyes(cv2_image, storage) + \
        #        detect_nose(cv2_image, storage) + \
        #        detect_mouth(cv2_image, storage)
        points = detect_faces(cv2_image, storage)  # Get points of faces.
        cv2_image = draw_from_points(cv2_image, points)  # Draw points

        screen.fill([0, 0, 0])  # Blank fill the screen

        screen.blit(cv2image_to_pygame(cv2_image),
                    (0, 0))  # Load new image on screen

        pygame.display.update()  # Update pygame display
import cv2 as cv

img = cv.LoadImage("friend1.jpg")

image_size = cv.GetSize(img)  #获取图片的大小
greyscale = cv.CreateImage(image_size, 8, 1)  #建立一个相同大小的灰度图像
cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY)  #将获取的彩色图像,转换成灰度图像
storage = cv.CreateMemStorage(0)  #创建一个内存空间,人脸检测是要利用,具体作用不清楚

cv.EqualizeHist(greyscale, greyscale)  #将灰度图像直方图均衡化,貌似可以使灰度图像信息量减少,加快检测速度
# detect objects
cascade = cv.Load('haarcascade_frontalface_alt2.xml')  #加载Intel公司的训练库

#检测图片中的人脸,并返回一个包含了人脸信息的对象faces
faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2,
                             cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50))

#获得人脸所在位置的数据
j = 0  #记录个数
for (x, y, w, h), n in faces:
    j += 1
    cv.SetImageROI(img, (x, y, w, h))  #获取头像的区域
    cv.SaveImage("face" + str(j) + ".jpg", img)
    #保存下来