コード例 #1
0
    def found_face(self):
        # global frame_copy
        if (not self.camera_is_on()) or (not self.find_face_is_on()):
            return False

        self.flushCameraBuffer()  # this reduces the frame delay
        frame = cv.QueryFrame(self.capture)
        if frame is None:
            self.close_camera()
            return False

        if not frame:
            cv.WaitKey(0)
        if not self.frame_copy:
            self.frame_copy = cv.CreateImage((frame.width, frame.height),
                                             cv.IPL_DEPTH_8U, frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(frame, self.frame_copy)
        else:
            cv.Flip(frame, self.frame_copy, 0)

        if self.showVideo:
            result = self.detect_and_draw(self.frame_copy)
        else:
            result = self.detect_no_draw(self.frame_copy)
        cv.WaitKey(10)
        return result
コード例 #2
0
 def _build_image(self, frame):
     if not self._frame:
         self._frame = cv2.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels)
     if frame.origin == cv2.IPL_ORIGIN_TL:
         cv2.Copy(frame, self._frame)
     else:
         cv2.Flip(frame, self._frame, 0)
     return IplQImage(self._frame)
コード例 #3
0
def detect_and_draw(img, cascade):
    t = cv2.GetTickCount()  ## start counter
    cv2.CvtColor(img, gray, cv2.CV_BGR2GRAY)
    cv2.Resize(gray, small_img, cv2.CV_INTER_LINEAR)

    #Ages all trackedFaces
    for f in trackedFaces:
        f.updateLife()
    #Remove expired faces
    for f in trackedFaces:
        if (f.isTooOld()):
            trackedFaces.remove(f)

    faces = cv2.HaarDetectObjects(small_img, cascade, storage, haar_scale,
                                  min_neighbors, haar_flags, min_size)
    drawline = 0
    if faces:
        #found a face
        for ((x, y, w, h), n) in faces:
            matchedFace = False
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            pt3 = (int(x * image_scale) + int(
                ((x + w) * image_scale - x * image_scale) / 3),
                   int(y * image_scale))
            pt4 = (int((x + w) * image_scale) - int(
                ((x + w) * image_scale - x * image_scale) / 3),
                   int((y * image_scale) + int((
                       (y + h) * image_scale) - int(y * image_scale)) / 3))

            #check if there are trackedFaces
            if (len(trackedFaces) > 0):
                #each face being tracked
                for f in trackedFaces:
                    #the face is found (small movement)
                    if ((abs(f.xpt - pt1[0]) < FACE_MAX_MOVEMENT)
                            and (abs(f.ypt - pt1[1]) < FACE_MAX_MOVEMENT)):
                        matchedFace = True
                        f.updateFace(int(w * image_scale),
                                     int(h * image_scale), pt1[0], pt1[1])
                        mf = f
                        break

                #if face not found, add a new face
                if (matchedFace == False):
                    f = Face(0, int(w * image_scale), int(h * image_scale),
                             pt1[0], pt1[1], 0)
                    trackedFaces.append(f)
                    mf = f
            #No tracked faces: adding one
            else:
                f = Face(0, int(w * image_scale), int(h * image_scale), pt1[0],
                         pt1[1], 0)
                trackedFaces.append(f)
                mf = f
            #where to draw face and properties
            if (mf.age > 5):

                #draw attention line
                lnpt1 = (int(mf.xpt * scale), int(mf.ypt * scale - 5) - 5)
                if (mf.age > mf.width):
                    lnpt2 = (int(mf.xpt * scale + mf.width),
                             int(mf.ypt * scale - 5))
                else:
                    lnpt2 = (int(mf.xpt * scale + mf.age),
                             int(mf.ypt * scale - 5))

                cv2.Rectangle(img, lnpt1, lnpt2, RED, 4, 8,
                              0)  ## drawing bolded attention line

                ### draw eyes
                cv2.Rectangle(img, mf.eyeLeft1, mf.eyeLeft2, MAGENTA, 3, 8, 0)
                cv2.Rectangle(img, mf.eyeRight1, mf.eyeRight2, MAGENTA, 3, 8,
                              0)
                #
                ### draw mouth
                cv2.Rectangle(img, mf.mouthTopLeft, mf.mouthBotRight, ORANGE,
                              3, 8, 0)
                #
                ### draw face
                cv2.Rectangle(img, pt1, pt2, getColor(mf), 3, 8, 0)
                #cv2.Rectangle( img, pt3, pt4, MAGENTA, 1, 8, 0 ) #forehead
                drawline = mf.age

    if (CAPTURING): saveAsJPG(img)
    if (osName == "nt"): cv2.Flip(img, img, 0)
    cv2.ShowImage('Camera', img)
    t = cv2.GetTickCount() - t  ## counter for FPS
    print("%i fps." % (cv2.GetTickFrequency() * 1000000. / t))  ## print FPS
コード例 #4
0
    gray = cv2.CreateImage(frame_size, 8, 1)
    small_img = cv2.CreateImage(
        (int(frame_size[0] / image_scale), int(frame_size[1] / image_scale)),
        8, 1)
    cascade = cv2.Load(cascade_name)
    #
    while 1:  # do forever
        # capture the current image
        frame = cv2.QueryFrame(capture)
        if frame is None:
            # no image captured... end the processing
            break
        #
        ### check OS
        if (osName == "nt"):
            cv2.Flip(frame, frame, 0)
        else:
            cv2.Flip(frame, None, 1)
        #
        ### detecting faces here
        detect_and_draw(frame, cascade)
        #
        ### handle key events
        k = cv2.WaitKey(5)
        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            cv2.DestroyWindow('Camera')
            break

        import numpy as np
import cv2
コード例 #5
0
                topmost = i
                temp = 2
    return (leftmost, rightmost, topmost, bottommost)


capture = cv.VideoCapture(0)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 1280)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
frame = cv.QueryFrame(capture)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.NamedWindow("output")
previous_x = 0
previous_y = 0
while (1):
    frame = cv.QueryFrame(capture)
    cv.Flip(frame, frame, 1)
    # we make all drawings on imdraw.
    imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
    # we get coordinates from imgyellowthresh
    imgyellowthresh = getthresholdedimg(frame)
    # eroding removes small noises
    cv.Erode(imgyellowthresh, imgyellowthresh, None, 1)
    (leftmost, rightmost, topmost, bottommost) = getpositions(imgyellowthresh)
    if (leftmost - rightmost != 0) or (topmost - bottommost != 0):
        lastx = posx
        lasty = posy
        posx = cv.Round((rightmost + leftmost) / 2)
        posy = cv.Round((bottommost + topmost) / 2)
        if lastx != 0 and lasty != 0:
            win32api.SetCursorPos((posx, posy))
コード例 #6
0
if cap:
    frame_copy = None

while (True):
    # Capture frame-by-frame
    result, frame = capture.read()
    #frame = cv.QueryFrame(cap)
    if not frame:
        cv.waitKey(0)
        break
    if not frame_copy:
        frame_copy = cv.CreateImage((frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
    if frame.origin == cv.IPL_ORIGIN_TL:
        cv.Flip(frame, frame, -1)

    # Our operations on the frame come here
    gray = cv.CreateImage((frame.width, frame.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        frame.width / image_scale), cv.Round(frame.height / image_scale)), 8,
                               1)

    # convert color input image to grayscale
    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)
コード例 #7
0
ap.add_argument("-f", "--face", required = True, help = "caminho para um classificador de faces")
ap.add_argument("-v", "--video", help = " caminho para um arquivo de vídeo")
args = vars( ap.parse_args() )

fd = FaceDetector( args["face"] )

if ( not args.get("video", False) ):
	camera = cv2.VideoCapture( 0 )
else:
	camera = cv2.VideoCapture( args["video"] )

while (True):
	(grabbed, frame) = camera.read()
	if ( args.get("video") and not grabbed ):
		break
	frame = imutils.resize(frame, width = 300)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	gray = cv2.Flip(gray, flipMode=-1)
	
	faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 3, minSize = (30, 30))
	frameClone = frame.copy()
	for (fX, fY, fW, fH) in faceRects:
		cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 255, 0), 2)
	cv2.imshow("Face", frameClone)
	if ( cv2.waitKey(1) & 0xFF == ord("q") ):
		break


camera.release()
cv2.destroyAllWindows()