예제 #1
0
    def detect_face(self, image):
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.1
        min_neighbors = 2
        haar_flags = 0

        # Allocate the temporary images
        gray = cv.CreateImage((image.width, image.height), 8, 1)
        smallImage = cv.CreateImage((cv.Round(
            image.width / image_scale), cv.Round(image.height / image_scale)),
                                    8, 1)

        # Convert color input image to grayscale
        cv.CvtColor(image, gray, cv.CV_BGR2GRAY)

        # Scale input image for faster processing
        cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

        # Equalize the histogram
        cv.EqualizeHist(smallImage, smallImage)

        # Detect the faces
        faces = cv.HaarDetectObjects(smallImage, self.cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)

        return faces
예제 #2
0
 def show_img(self):
     global face_rect
     global cam
     # 一个死循环,用来不间断的显示图片
     while True:
         img = cv.QueryFrame(cam)  # 取出视频中的一帧
         # 保存三通道的图片
         src = cv.CreateImage((img.width, img.height), 8, 3)
         cv.Resize(img, src, cv.CV_INTER_LINEAR)
         # 保存灰度图片
         gray = cv.CreateImage((img.width, img.height), 8, 1)
         cv.CvtColor(img, gray, cv.CV_BGR2GRAY)  # 将rgb图片变成灰度图
         cv.EqualizeHist(gray, gray)  # 对灰度图进行直方图均衡化
         rects = detect(gray, cascade)  # 传入图片和分类器,如果检测到人脸,返回人脸的坐标和大小
         face_rect = rects
         # 话那个绿色的人脸框
         draw_rects(src, rects, (0, 255, 0))
         # 显示画框的人脸
         cv.ShowImage('DeepFace ZhangLi', src)
         #path = 'C:/Users/ZhangLi/Desktop/demo.jpg'
         #showImage = QtGui.QImage(img.data, img.shape[1], img.shape[0], QtGui.QImage.Format_RGB888)
         #self.show_video.setPixmap(QtGui.QPixmap.fromImage(showImage))
         #png = QPixmap(path)
         #self.show_video.setPixmap(png)
         cv2.waitKey(5) == 27
     cv2.destroyAllWindows()
    def normalize(self, image):

        #Checks whether inputs are correct
        if self.image_check(image) < 0:
            return -1

        #chaning the image to grayscale

        gsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
        newgsimage = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
        cv.CvtColor(image, gsimage, cv.CV_RGB2GRAY)
        cv.EqualizeHist(gsimage, newgsimage)

        if self.visualize:
            while True:
                cv.NamedWindow("Normal")
                cv.ShowImage("Normal", gsimage)
                cv.WaitKey(5)
                cv.NamedWindow("Histogram Equalized")
                cv.ShowImage("Histogram Equalized", newgsimage)
                if cv.WaitKey(5) == 1048603:
                    break
            cv.DestroyAllWindows()

        return newgsimage
예제 #4
0
def detect_and_draw(img, face_cascade):
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    image_scale = img.width / smallwidth

    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)
    # gray = cv.CreateImage((img.width,img.height), 8, 1)
    image_scale = img.width / smallwidth
    # small_img = cv.CreateImage((cv.Round(img.width / image_scale), cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    faces = cv.HaarDetectObjects(small_img, face_cascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    if opencv_preview and faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            if verbose:
                print "Face at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[
                    1]

    return True if faces else False
예제 #5
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                                cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
            haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)
예제 #6
0
def detectFace(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage(
        (cv.Round(img.width / imageScale), cv.Round(img.height / imageScale)),
        8, 1)
    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)
    faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                 haarScale, minNeighbors, haarFlags, minSize)

    if faces:
        print "\tDetected ", len(faces), " object(s)"
        for ((x, y, w, h), n) in faces:
            #the input to cv.HaarDetectObjects was resized, scale the
            #bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * imageScale), int(y * imageScale))
            pt2 = (int((x + w) * imageScale), int((y + h) * imageScale))
            cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
        return img

    else:
        return False
예제 #7
0
def capture():
    """
        Using the intel training set to capture the face in the video.
        Most of them are frameworks in OpenCV.
    """
    j = 0
    g = os.walk("origin")
    for path, d, filelist in g:
        for filename in filelist:
            img = cv.LoadImage(os.path.join(path, filename))
            image_size = cv.GetSize(img)
            greyscale = cv.CreateImage(image_size, 8, 1)
            cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY)
            storage = cv.CreateMemStorage(0)

            cv.EqualizeHist(greyscale, greyscale)
            cascade = cv.Load('haarcascade_frontalface_alt2.xml')

            faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2,
                                         cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50))

            for (x, y, w, h), n in faces:
                j += 1
                cv.SetImageROI(img, (x, y, w, h))
                cv.SaveImage("captured/face" + str(j) + ".png", img)
예제 #8
0
def DetectFace(image, faceCascade, returnImage=False):
    # This function takes a grey scale cv image and finds
    # the patterns defined in the haarcascade function
    # modified from: http://www.lucaamore.com/?p=638

    #variables
    min_size = (30, 30)
    #image_scale = 2
    haar_scale = 1.1
    min_neighbors = 2
    haar_flags = 0

    # Equalize the histogram
    cv.EqualizeHist(image, image)

    # Detect the faces
    faces = cv.HaarDetectObjects(image, faceCascade, cv.CreateMemStorage(0),
                                 haar_scale, min_neighbors, haar_flags,
                                 min_size)

    # If faces are found
    if faces and returnImage:
        for ((x, y, w, h), n) in faces:
            # Convert bounding box to two CvPoints
            pt1 = (int(x), int(y))
            pt2 = (int(x + w), int(y + h))
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 5, 8, 0)

    if returnImage:
        return image
    else:
        return faces
    def detect_and_draw(self, img, cascade, camera_position=0):
        min_size = (20, 20)
        image_scale = self.horizontalSlider_3.value()
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        # allocate temporary images
        gray = cv.CreateImage((img.width, img.height), 8, 1)
        small_img_height = cv.Round(img.height / image_scale)
        small_img = cv.CreateImage(
            (cv.Round(img.width / image_scale), small_img_height), 8, 1)
        # convert color input image to grayscale
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
        # scale input image for faster processing
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        if faces:
            for ((x, y, w, h), n) in faces:
                if self.face_cert < n:
                    x2, y2, w2, h2 = self.make_the_rectangle_bigger(
                        x, y, w, h, 1.22, small_img_height, image_scale)
                    self.create_person_and_add_to_room(img, (x2, y2, w2, h2),
                                                       camera_position)
                    if self.mark_detected_objects[camera_position]:
                        pt2 = (int(x2 + w2), int(y2 + h2))
                        cv.Rectangle(img, (x2, y2), pt2, cv.RGB(255, 0, 0), 3,
                                     8, 0)
        if self.show_main_view[camera_position]:
            cv.ShowImage("result" + str(camera_position), img)
    def Magnitude(self, dx, dy, Mask=None, precise=True, method="cv"):
        '''Calculates the magnitude of the gradient using precise and fast approach'''

        dxconv = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F, dx.channels)
        dyconv = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F, dx.channels)
        dxdest = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F, dx.channels)
        dydest = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F, dx.channels)
        magdest = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F, dx.channels)
        magnitude = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F,
                                   dx.channels)
        magnitudetemp = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F,
                                       dx.channels)
        zero = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_32F, dx.channels)

        cv.Convert(dx, dxconv)
        cv.Convert(dy, dyconv)

        if precise:
            cv.Pow(dxconv, dxdest, 2)
            cv.Pow(dyconv, dydest, 2)
            cv.Add(dxdest, dydest, magdest)
            cv.Pow(magdest, magnitude, 1. / 2)
        else:
            #Add the |dx| + |dy|
            return None

        if method == "slow":
            size = cv.GetSize(magnitude)

            for x in range(size[0]):
                for y in range(size[1]):
                    if Mask == None:
                        pass
                    elif Mask[y, x] > 0:
                        pass
                    else:
                        magnitude[y, x] = 0

            final = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_8U,
                                   dx.channels)
            cv.ConvertScaleAbs(magnitude, final)
        else:
            cv.Add(zero, magnitude, magnitudetemp, Mask)
            final = cv.CreateImage(cv.GetSize(dx), cv.IPL_DEPTH_8U,
                                   dx.channels)
            cv.ConvertScaleAbs(magnitudetemp, final)

        if self.visualize:
            magnitude2 = cv.CreateImage(cv.GetSize(dy), cv.IPL_DEPTH_8U, 1)
            cv.EqualizeHist(final, magnitude2)
            while True:
                cv.NamedWindow("Magnitude")
                cv.ShowImage("Magnitude", magnitude2)
                c = cv.WaitKey(5)
                if c > 0:

                    break
        cv.DestroyAllWindows()

        return final
예제 #11
0
def detect_and_draw(img, cascade, jpg_cnt):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 10000))
        if faces:
            for ((x, y, w, h), n) in faces:

                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))

                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

                if jpg_cnt % 50 == 1:
                    print('capture completed')
                    cv.SaveImage('test_' + str(jpg_cnt) + '.jpg', img)
                    print("aaa1")
                    url = 'http://210.94.185.52:8080/upload.php'
                    #files={ 'upfiles' : open('/home/lee/test_'+str(jpg_cnt)+'.jpg','rb')}
                    files = {
                        'upfiles':
                        open('/home/lee/test_' + str(jpg_cnt) + '.jpg', 'rb')
                    }
                    print("aaa2")
                    r = requests.post(url, files=files)
                    print("aaa3")
                    print(r.text)
                    for i in r.text.split():
                        try:
                            op = float(i)
                            break
                        except:
                            continue
                    print(op)
                    #LED
                    if op >= 0.9:
                        lock_on()
                    else:
                        print('no')

    cv.ShowImage("result", img)
예제 #12
0
def track(img, threshold=100):
    '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100).
       Returns: (x,y) coordinates of centroid if found
                (-1,-1) if no centroid was found
                None if user hit ESC
    '''
    cascade = cv.Load("haarcascade_frontalface_default.xml")
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    center = (-1, -1)
    faces = []
    original_size_faces = []
    #import ipdb; ipdb.set_trace()
    if (cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                # cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                #cv.Rectangle(img, (x,y), (x+w,y+h), 255)
                # get the xy corner co-ords, calc the center location
                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]
                centerx = x1 + ((x2 - x1) / 2)
                centery = y1 + ((y2 - y1) / 2)
                center = (centerx, centery)

                scaled = ((x1, y1, x2 - x1, y2 - y1), n)
                original_size_faces.append(scaled)
                # print scaled


#    cv.NamedWindow(WINDOW_NAME, 1)
#    cv.ShowImage(WINDOW_NAME, img)

#    if cv.WaitKey(5) == 27:
#        center = None
    return (center, original_size_faces)
예제 #13
0
def histogramequalization():
    src = cv.LoadImage(getpath(), cv.CV_LOAD_IMAGE_GRAYSCALE)
    dst = cv.CreateImage((src.width, src.height), src.depth, src.channels)
    cv.EqualizeHist(src, dst)
    cv.NamedWindow("SourceImage", 1)
    cv.NamedWindow("EqualizedImage", 1)
    cv.ShowImage("SourceImage", src)
    cv.ShowImage("EqualizedImage", dst)
    cv.WaitKey(0)
예제 #14
0
def DetectRedEyes(image, faceCascade, eyeCascade):
	min_size = (20,20)
	image_scale = 2
	haar_scale = 1.2
	min_neighbors = 2
	haar_flags = 0

	# Allocate the temporary images
	gray = cv.CreateImage((image.width, image.height), 8, 1)
	smallImage = cv.CreateImage((cv.Round(image.width / image_scale),cv.Round (image.height / image_scale)), 8 ,1)

	# Convert color input image to grayscale
	cv.CvtColor(image, gray, cv.CV_BGR2GRAY)

	# Scale input image for faster processing
	cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

	# Equalize the histogram
	cv.EqualizeHist(smallImage, smallImage)

	# Detect the faces
	faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
	haar_scale, min_neighbors, haar_flags, min_size)

	# If faces are found
	if faces:
		for ((x, y, w, h), n) in faces:
		# the input to cv.HaarDetectObjects was resized, so scale the
		# bounding box of each face and convert it to two CvPoints
			pt1 = (int(x * image_scale), int(y * image_scale))
			pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
			cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
			face_region = cv.GetSubRect(image,(x,int(y + (h/4)),w,int(h/2)))

		cv.SetImageROI(image, (pt1[0],
			pt1[1],
			pt2[0] - pt1[0],
			int((pt2[1] - pt1[1]) * 0.7)))
		eyes = cv.HaarDetectObjects(image, eyeCascade,
		cv.CreateMemStorage(0),
		haar_scale, min_neighbors,
		haar_flags, (15,15))	

		if eyes:
			# For each eye found
			for eye in eyes:
				# Draw a rectangle around the eye
				cv.Rectangle(image,
				(eye[0][0],
				eye[0][1]),
				(eye[0][0] + eye[0][2],
				eye[0][1] + eye[0][3]),
				cv.RGB(255, 0, 0), 1, 8, 0)

	cv.ResetImageROI(image)
	return image
예제 #15
0
def readWholeImg(imgname):
    curFrame = cv.LoadImage(imgname, 1)
    gray = cv.CreateImage((curFrame.width, curFrame.height), 8, 1)
    cv.CvtColor(curFrame, gray, cv.CV_BGR2GRAY)
    img48 = cv.CreateImage((48, 48), 8, 1)
    cv.Resize(gray, img48, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(img48, img48)
    face_vector = np.asarray(img48[:, :])
    face_vector = face_vector.reshape(48 * 48)
    return [[face_vector], curFrame]
    def OnPaint(self, evt):
        if not self.timer.IsRunning() :
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap, wx.BUFFER_VIRTUAL_AREA)
            dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            return
            
        # Capture de l'image
        frame = cv.QueryFrame(CAMERA)
        cv.CvtColor(frame, frame, cv.CV_BGR2RGB)
        Img = wx.EmptyImage(frame.width, frame.height)
        Img.SetData(frame.tostring())
        self.bmp = wx.BitmapFromImage(Img)
        width, height = frame.width, frame.height
        
        # Détection des visages
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0

        gray = cv.CreateImage((frame.width, frame.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(frame.width / image_scale), cv.Round (frame.height / image_scale)), 8, 1)
        cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)
        
        listeVisages = cv.HaarDetectObjects(small_img, CASCADE, cv.CreateMemStorage(0), haar_scale, min_neighbors, haar_flags, min_size)

        # Affichage de l'image
        x, y = (0, 0)
        try:
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap, wx.BUFFER_VIRTUAL_AREA)
            try :
                dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            except :
                pass
            dc.Clear()
            dc.DrawBitmap(self.bmp, x, y)
            
            # Dessin des rectangles des visages
            if listeVisages :
                for ((x, y, w, h), n) in listeVisages :
                    dc.SetBrush(wx.TRANSPARENT_BRUSH)
                    dc.SetPen(wx.Pen(wx.Colour(255, 0, 0), 2))
                    dc.DrawRectangle(x* image_scale, y* image_scale, w* image_scale, h* image_scale)
            
            self.listeVisages = listeVisages
            del dc
            del Img
            
        except TypeError:
            pass
        except wx.PyDeadObjectError:
            pass
예제 #17
0
def detect_and_draw(img, cascade, detected):
    # allocate temporary images

    gray = cv.CreateImage((img.width, img.height), 8, 1)
    image_scale = img.width / smallwidth
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        # t = cv.GetTickCount() - t
        # print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            if detected == 0:
                # os.system('festival --tts hi &')
                detected = 1
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                print "Face at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[
                    1]
                # find amount needed to pan/tilt
                span = (pt1[0] + pt2[0]) / 2
                stlt = (pt1[1] + pt2[1]) / 2
                mid = smallwidth / 2
                if span < mid:
                    print "left", mid - span
                else:
                    print "right", span - mid

#os.system('echo "6="' + str(valTilt) + ' > /dev/pi-blaster')
#os.system('echo "7="' + str(valPan) + ' > /dev/pi-blaster')
        else:
            if detected == 1:
                #print "Last seen at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[1]
                #os.system('festival --tts bye &')
                status = "just disappeared"
            detected = 0

    cv.ShowImage("result", img)
    return detected
예제 #18
0
def detect_and_draw(img, cascade, c):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    face_flag = False

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            face_flag = True
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))

                # ある程度顔が検出されたら
                if c > 4:
                    # 画像の保存
                    global counter
                    counter = -1
                    d = datetime.today()
                    datestr = d.strftime('%Y-%m-%d_%H-%M-%S')
                    outputname = '/home/pi/fd/fd_' + datestr + '.jpg'
                    cv.SaveImage(outputname, img)
                    print 'Face Detect'

                    # 読み込みと切り取り
                    fimg = cv.LoadImage(outputname)
                    fimg_trim = fimg[pt1[1]:pt2[1], pt1[0]:pt2[0]]
                    outputname2 = '/home/pi/fd/face_' + datestr + '.jpg'
                    cv.SaveImage(outputname2, fimg_trim)
                    print 'Face Image Save'

                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)

    return face_flag
예제 #19
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
			       cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            count = 0
            stop = 1
            name = 1
            no = 1
            dict = {}
            for num in range(14):
                dict[name] = no
                name += 1
            print dict
            f = open('no.json','w')
            json.dump(dict,f)
            #for count in range(14):
            #time.sleep(stop)
                #count += 1
                #print(count)
            #time.sleep(stop)
            #cv.PutText(img, "SAMPLE_TEXT", (0, 50), cv.CV_FONT_HERSHEY_PLAIN, cv.RGB(255, 255, 255))
            #cv.PutText(img, "SAMPLE_TEXT", (0, 50), cv.CV_FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 2, cv.CV_AA )
            for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the 
            # bounding box of each face and convert it to two CvPoints
                #for count in range(14):
                count += 1
                print(count)
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                #count = count + 1
                #print(count)
                # cv.putText(img, "SAMPLE_TEXT", (0, 50), FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 2, cv.CV_AA)


    cv.ShowImage("result", img)
예제 #20
0
def dect_image(filename):
    img = cv.LoadImage(filename)
    gray = cv.CreateImage(cv.GetSize(img), 8, 1)
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
    cv.EqualizeHist(gray, gray)
    rects = detect(img, cascade)
    if len(rects) != 0:
        rect = (rects[0][0], rects[0][1], rects[0][2] - rects[0][0],
                rects[0][3] - rects[0][1])
        cv.SetImageROI(img, rect)
    cv.SaveImage(filename, img)
예제 #21
0
def DetectRedEyes(image, faceCascade):
    min_size = (20, 20)
    image_scale = 2
    haar_scale = 1.1
    min_neighbors = 2
    haar_flags = 0

    # Allocate the temporary images
    gray = cv.CreateImage((image.width, image.height), 8, 1)
    smallImage = cv.CreateImage((cv.Round(
        image.width / image_scale), cv.Round(image.height / image_scale)), 8,
                                1)

    # Convert color input image to grayscale
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv.HaarDetectObjects(smallImage, faceCascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:

        #print faces

        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            #print "face"
            global line2
            line2 = n
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            # print pt1
            # print pt2
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 1, 8, 0)
            cv.PutText(image, "face" + str(h), pt1, font, cv.RGB(255, 0, 0))
            cv.PutText(image, "Come close.", (0, 20), font, cv.RGB(255, 0, 0))
            cv.PutText(image, "Ensure your forehead is well lit.", (0, 40),
                       font, cv.RGB(255, 0, 0))
            cv.PutText(image, "Hit escape when done.", (0, 60), font,
                       cv.RGB(255, 0, 0))

    cv.ResetImageROI(image)
    return image
예제 #22
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CloneMat(img)# cv.CreateImage((img.width,img.height)) # (cv.Round(img.width / image_scale),cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        #Scan image and get an array of faces
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                print "X " , x 
                if int(x * image_scale) > (img.width * 0.45):
                    #print "X " , x
                    #print steppera.IsTurning()
                    if (steppera.IsTurning() == False):
                        if (stepperInUse[STEPPERA] == True):
                            sensor_value = "-4"
                            if isNumeric(sensor_value):
                                print "Moving to" , sensor_value
                                steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0))
                                while (steppera.IsTurning() == True):
                                    cv.WaitKey(100)
                if int((x + w) * image_scale) < (img.width * 0.55):
                    #print "X " , x
                    #print steppera.IsTurning()
                    if (steppera.IsTurning() == False):
                        if (stepperInUse[STEPPERA] == True):
                            sensor_value = "4"
                            if isNumeric(sensor_value):
                                print "Moving to" , sensor_value
                                steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0))
                                while (steppera.IsTurning() == True):
                                    cv.WaitKey(100)

    cv.ShowImage("result", img)
예제 #23
0
파일: c.py 프로젝트: lujinda/pylot
def ifFace(img, size):
    gray = cv.CreateImage(size, 8, 1)
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
    newMem = cv.CreateMemStorage(0)
    cv.EqualizeHist(gray, gray)
    face = cv.HaarDetectObjects(gray, c_f, newMem, 1.2, 3,
                                cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50))
    mouth = cv.HaarDetectObjects(gray, c_m, newMem, 1.2, 2,
                                 cv.CV_HAAR_DO_CANNY_PRUNING, (0, 0))
    if face and mouth:
        print "有脸"
        cv.SaveImage("img/out.jpg", img)
        sys.exit(0)
예제 #24
0
def detect_and_draw(img, front_cascade, profile_cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                   cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(front_cascade):
        # Test for frontal face
        faces = cv.HaarDetectObjects(small_img, front_cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        if faces: # we've detected a face
            return [faces, FRONTAL]

        # Test for profile face
        faces = cv.HaarDetectObjects(small_img, profile_cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        if faces: # we've detected a face
            return [faces, PROFILE]

        #t = cv.GetTickCount() - t
        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        #if faces:
            #for ((x, y, w, h), n) in faces:
                ## the input to cv.HaarDetectObjects was resized, so scale the
                ## bounding box of each face and convert it to two CvPoints
                #pt1 = (int(x * image_scale), int(y * image_scale))
                #pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))

                #imgWidth, imgHeight = cv.GetSize(img)
                #croppedX = max(0, x*image_scale-w*image_scale/2) 
                #croppedY = max(0, y*image_scale-h*image_scale/2)
                #croppedW = min(imgWidth, (2*w)*image_scale)
                #croppedH = min(imgHeight, (2*h)*image_scale)

                #imgCropped = cv.CreateImage((croppedW, croppedH), img.depth, img.nChannels)
                #srcRegion = cv.GetSubRect(img, (croppedX, croppedY, croppedW, croppedH))
                #cv.Copy(srcRegion, imgCropped)
                #cv.ShowImage("cropped", imgCropped)

                #cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    return []
예제 #25
0
def detect_face(image, faceCascade):
    #variables
    min_size = (20, 20)
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Equalize the histogram
    cv.EqualizeHist(image, image)

    # Detect the faces
    faces = cv.HaarDetectObjects(image, faceCascade, cv.CreateMemStorage(0),
                                 haar_scale, min_neighbors, haar_flags,
                                 min_size)
    return faces
예제 #26
0
파일: youku.py 프로젝트: lujinda/pylot
 def ifFace(img,size):
     gray=cv.CreateImage(size,8,1)
     cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
     newMem1=cv.CreateMemStorage(0)
     newMem2=cv.CreateMemStorage(0)
     newMem3=cv.CreateMemStorage(0)
     cv.EqualizeHist(gray,gray)
     face=cv.HaarDetectObjects(gray,c_f,newMem1,1.2,3,cv.CV_HAAR_DO_CANNY_PRUNING,(50,50))
     mouth=cv.HaarDetectObjects(gray,c_m,newMem2,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(10,10))
     body=cv.HaarDetectObjects(gray,c_m,newMem3,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(100,100))
     if face and mouth or body:
         cv.SaveImage("img/out.jpg",img)
         return 1
     else:
         return 0
예제 #27
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            facenum = 0
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

                #code copied from https://github.com/mitchtech/py_servo_facetracker/blob/master/facetracker_servo_gpio.py

                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]
                midFaceX = x1 + ((x2 - x1) / 2)
                midFaceY = y1 + ((y2 - y1) / 2)
                facenum = facenum + 1
                client.publish(topic + str(facenum),
                               str(midFaceX) + "," + str(midFaceY), 0)

                print topic + str(facenum), str(midFaceX) + "," + str(midFaceY)

    cv.ShowImage("result", img)
예제 #28
0
    def get_faces(self, image):
        """
		Given an opencv image, return a ((x,y,w,h), certainty) tuple for each face
		detected.
		"""

        # Convert the image to grayscale and normalise
        cv.CvtColor(image, self.gray, cv.CV_BGR2GRAY)
        cv.EqualizeHist(self.gray, self.gray)

        # Detect faces
        return cv.HaarDetectObjects(self.gray,
                                    self.cascade,
                                    self.storage,
                                    scale_factor=1.3,
                                    min_neighbors=2,
                                    flags=cv.CV_HAAR_DO_CANNY_PRUNING,
                                    min_size=(40, 40))
예제 #29
0
def detect_and_draw(img,cascade):
    gray=cv.CreateImage((img.width,img.height),8,1)
    small_img=cv.CreateImage((cv.Round(img.width/image_scale),cv.Round(img.height/image_scale)),8,1)
    cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
    cv.Resize(gray,small_img,cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img,small_img)

    if(cascade):
        t=cv.GetTickCount()
        faces=cv.HaarDetectObjects(small_img,cascade,cv.CreateMemStorage(0),haar_scale,min_neighbors,haar_flags,min_size)
        t=cv.GetTickCount()-t
        print "time taken for detection = %gms"%(t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x,y,w,h),n) in faces:
                pt1=(int(x*image_scale),int(y*image_scale))
                pt2=(int((x+w)*image_scale),int((y+h)*image_scale))
                cv.Rectangle(img,pt1,pt2,cv.RGB(255,0,0),3,8,0)

        cv.ShowImage("video",img)
예제 #30
0
def repeat():  
      
        #每次从摄像头获取一张图片  
    frame = cv.QueryFrame(capture)

    image_size = cv.GetSize(frame)#获取图片的大小  
    #print image_size
          
    greyscale = cv.CreateImage(image_size, 8, 1)#建立一个相同大小的灰度图像

    cv.CvtColor(frame, greyscale, cv.CV_BGR2GRAY)#将获取的彩色图像,转换成灰度图像

    storage = cv.CreateMemStorage(0)#创建一个内存空间,人脸检测是要利用,具体作用不清楚
          
    cv.EqualizeHist(greyscale, greyscale)#将灰度图像直方图均衡化,貌似可以使灰度图像信息量减少,加快检测速度  
    
    #画图像分割线
         
    cv.Line(frame, (210,0),(210,480), (0,255,255),1) 
    cv.Line(frame, (420,0),(420,480), (0,255,255),1) 
    cv.Line(frame, (0,160),(640,160), (0,255,255),1) 
    cv.Line(frame, (0,320),(640,320), (0,255,255),1) 
        # detect objects  
    cascade = cv.Load('/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml')
    #加载Intel公司的训练库  
      
        #检测图片中的人脸,并返回一个包含了人脸信息的对象faces  
    faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2,
                                 cv.CV_HAAR_DO_CANNY_PRUNING,
                                 (100, 100))  
      
        #获得人脸所在位置的数据  
    for (x,y,w,h) , n in faces:
       # print x,y
        if x<210:
            print "right"
        elif x>310:
            print "left"
        cv.Rectangle(frame, (x,y), (x+w,y+h), (0,128,0),2)#在相应位置标识一个矩形 边框属性(0,0,255)红色 20宽度
          
        cv.ShowImage("W1", greyscale)#显示互有边框的图片
          
    cv.ShowImage("W1", frame)