Esempio n. 1
0
def precornerdetect(image):
    # assume that the image is floating-point 
    corners = cv.CloneMat(image)
    cv.PreCornerDetect(image, corners, 3)

    dilated_corners = cv.CloneMat(image)
    cv.Dilate(corners, dilated_corners, None, 1)

    corner_mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
    cv.Sub(corners, dilated_corners, corners)
    cv.CmpS(corners, 0, corner_mask, cv.CV_CMP_GE)
    return (corners, corner_mask)
Esempio n. 2
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = True

        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.CalcArrBackProject([self.hue], backproject, hist)

            # Run the cam-shift (if the a window is set and != 0)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window,
                                          crit)  #Call the camshift !!
                self.track_window = rect  #Put the current rectangle as the tracked area

            # If mouse is pressed, highlight the current selected rectangle and recompute histogram
            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)  #Get specified area

                #Make the effect of background shadow when selecting a window
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)

                #Draw temporary rectangle
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                #Take the same area but in hue image to calculate histogram
                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)

                #Used to rescale the histogram with the max value (to draw it later on)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)

            elif self.track_window and is_rect_nonzero(
                    self.track_window):  #If window set draw an elipseBox
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            cv.ShowImage("CamShiftDemo", frame)
            cv.ShowImage("Backprojection", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        while True:
            frame = 0
            frame = self.capture  #cv.QueryFrame( self.capture )

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            #             if self.track_window and is_rect_nonzero(self.track_window):
            #                 crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
            #                 (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
            #                 self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                #cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)


#             elif self.track_window and is_rect_nonzero(self.track_window):
#                 cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )

            if not backproject_mode:
                cv.ShowImage("SelectROI", frame)
            else:
                cv.ShowImage("SelectROI", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                f = open('newtree.yaml', "w")
                yaml.dump(self.selection, f)
                f.close()
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
    def remap(self, src):
        """
        :param src: source image
        :type src: :class:`cvMat`

        Apply the post-calibration undistortion to the source image
        """
        r = cv.CloneMat(src)
        cv.Remap(src, r, self.mapx, self.mapy)
        return r
Esempio n. 5
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CloneMat(img)# cv.CreateImage((img.width,img.height)) # (cv.Round(img.width / image_scale),cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        #Scan image and get an array of faces
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                print "X " , x 
                if int(x * image_scale) > (img.width * 0.45):
                    #print "X " , x
                    #print steppera.IsTurning()
                    if (steppera.IsTurning() == False):
                        if (stepperInUse[STEPPERA] == True):
                            sensor_value = "-4"
                            if isNumeric(sensor_value):
                                print "Moving to" , sensor_value
                                steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0))
                                while (steppera.IsTurning() == True):
                                    cv.WaitKey(100)
                if int((x + w) * image_scale) < (img.width * 0.55):
                    #print "X " , x
                    #print steppera.IsTurning()
                    if (steppera.IsTurning() == False):
                        if (stepperInUse[STEPPERA] == True):
                            sensor_value = "4"
                            if isNumeric(sensor_value):
                                print "Moving to" , sensor_value
                                steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0))
                                while (steppera.IsTurning() == True):
                                    cv.WaitKey(100)

    cv.ShowImage("result", img)
Esempio n. 6
0
 def __init__(self, src_image):
     self.src_image = src_image
     self.dst_image = cv.CloneMat(src_image)
     self.hist_image = cv.CreateImage((320, 200), 8, 1)
     self.hist = cv.CreateHist([hist_size], cv.CV_HIST_ARRAY, ranges, 1)
     self.brightness = 0
     self.contrast = 0
     cv.NamedWindow("image", 0)
     cv.NamedWindow("histogram", 0)
     cv.CreateTrackbar("brightness", "image", 100, 200,
                       self.update_brightness)
     cv.CreateTrackbar("contrast", "image", 100, 200, self.update_contrast)
     self.update_brightcont()
Esempio n. 7
0
def extractEyeBrows(originalImage, pt1, centerX, centerY, eyeBallParams):

    (eyeBallCenterX, eyeBallCenterY, eyeBallRadius) = eyeBallParams
    # find good features
    #                 eig_image = cv.CreateMat(gray_im.rows, gray_im.cols, cv.CV_32FC1)
    #                 temp_image = cv.CreateMat(gray_im.rows, gray_im.cols, cv.CV_32FC1)
    #                 for (x,y) in cv.GoodFeaturesToTrack(gray_im, eig_image, temp_image, 10, 0.04, 1.0, useHarris = True):
    #                     print "good feature at", x,y
    #                     cv.Rectangle(img, (int(x), int(y)),(int(x) + 20, int(y) + 20), cv.RGB(255, 255, 255))

    #find color of the skin
    #prepare histogram

    eyebrow_Area = cv.GetSubRect(
        originalImage, (int(pt1[0] * 1.1), int(
            pt1[1] * 1.2), centerX - pt1[0], int((centerY - pt1[1]) * 0.6)))
    eyebrow_Area2 = cv.CloneMat(eyebrow_Area)
    cv.Smooth(eyebrow_Area2, eyebrow_Area2, cv.CV_GAUSSIAN, 9, 1)

    hsv_image = cv.CreateMat(eyebrow_Area.height, eyebrow_Area.width,
                             cv.CV_8UC3)
    imageArray = np.asarray(eyebrow_Area2, dtype=np.uint8)

    hsv_image = cv2.cvtColor(imageArray, cv2.COLOR_BGR2HSV)

    #                 histogram2 = hs_histogram(leftEyeArea)
    #                 print(histogram2)
    #                 imageArray2 = np.asarray(histogram2, dtype=np.uint8)
    #                 cv2.imshow("histo " , histogram2)

    #
    #dark = imageArray[...,2] < 32
    #set not frequent to dark
    #imageArray[dark] = 0
    #histogram = cv.CreateHist(2, cv.CV_HIST_ARRAY)
    histogram = cv2.calcHist([hsv_image], [0, 1], None, [180, 256],
                             [0, 180, 0, 256])

    h1 = np.clip(histogram * 0.005 * hist_scale, 0, 1)
    vis = hsv_map * h1[:, :, np.newaxis] / 255.0
    #print type(vis)
    #cv2.imshow('hist', vis)

    #backproj = None
    #cv.CalcBackProject(hsv_image, backproj, histogram)
    ranges = [0, 180, 0, 256]

    backproj = cv2.calcBackProject([hsv_image], [0, 1], histogram, ranges, 10)

    cv2.imshow("back proj ", backproj)
Esempio n. 8
0
    def detect_and_draw(self, imgmsg):
        if self.pause:
            return
        # frame = cv.QueryFrame( self.capture )
        frame = self.br.imgmsg_to_cv(imgmsg, "bgr8")

        # Convert to HSV and keep the hue
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.Split(hsv, self.hue, None, None, None)

        # Compute back projection
        backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

        # Run the cam-shift
        cv.CalcArrBackProject([self.hue], backproject, self.hist)
        if self.track_window and is_rect_nonzero(self.track_window):
            crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
            (iters, (area, value, rect),
             track_box) = cv.CamShift(backproject, self.track_window, crit)
            self.track_window = rect
            x, y, w, h = rect
            self.bbpub.publish(RegionOfInterest(x, y, w, h, False))
            proba_msg = self.br.cv_to_imgmsg(backproject)
            proba_msg.header = imgmsg.header
            self.bppub.publish(proba_msg)

        # If mouse is pressed, highlight the current selected rectangle
        # and recompute the histogram

        if self.drag_start and is_rect_nonzero(self.selection):
            sub = cv.GetSubRect(frame, self.selection)
            save = cv.CloneMat(sub)
            cv.ConvertScale(frame, frame, 0.5)
            cv.Copy(save, sub)
            x, y, w, h = self.selection
            cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

            sel = cv.GetSubRect(self.hue, self.selection)
            cv.CalcArrHist([sel], self.hist, 0)
            (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist)
            if max_val != 0:
                cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val)
        elif self.track_window and is_rect_nonzero(self.track_window):
            cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA,
                          0)

        self.frame = frame
        self.backproject = backproject
    def undistort_points(self, src):
        """
        :param src: N source pixel points (u,v) as an Nx2 matrix
        :type src: :class:`cvMat`

        Apply the post-calibration undistortion to the source points
        """

        dst = cv.CloneMat(src)
        cv.UndistortPoints(src,
                           dst,
                           self.intrinsics,
                           self.distortion,
                           R=self.R,
                           P=self.P)
        return dst
Esempio n. 10
0
    def run(self):

        copy = cv.CloneImage(self.image)

        while True:
            if self.drag_start and is_rect_nonzero(self.selection):
                copy = cv.CloneImage(self.image)
                sub = cv.GetSubRect(copy, self.selection)  #Get specified area

                #Make the effect of background shadow when selecting a window
                save = cv.CloneMat(sub)

                cv.ConvertScale(copy, copy, 0.5)
                cv.Copy(save, sub)

                #Draw temporary rectangle
                x, y, w, h = self.selection
                cv.Rectangle(copy, (x, y), (x + w, y + h), (255, 255, 255))

            cv.ShowImage("Image", copy)
            c = cv.WaitKey(1)
            if c == 27 or c == 1048603 or c == 10:  #Break if user enters 'Esc'.
                break
    def downsample_and_detect(self, rgb):
        """
        Downsample the input image to approximately VGA resolution and detect the
        calibration target corners in the full-size image.

        Combines these apparently orthogonal duties as an optimization. Checkerboard
        detection is too expensive on large images, so it's better to do detection on
        the smaller display image and scale the corners back up to the correct size.

        Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)).
        """
        # Scale the input image down to ~VGA size
        (width, height) = cv.GetSize(rgb)
        scale = math.sqrt((width * height) / (640. * 480.))
        if scale > 1.0:
            scrib = cv.CreateMat(int(height / scale), int(width / scale),
                                 cv.GetElemType(rgb))
            cv.Resize(rgb, scrib)
        else:
            scrib = cv.CloneMat(rgb)
        # Due to rounding, actual horizontal/vertical scaling may differ slightly
        x_scale = float(width) / scrib.cols
        y_scale = float(height) / scrib.rows

        if self.pattern == Patterns.Chessboard:
            # Detect checkerboard
            (ok, downsampled_corners, board) = self.get_corners(scrib,
                                                                refine=True)

            # Scale corners back to full size image
            corners = None
            if ok:
                if scale > 1.0:
                    # Refine up-scaled corners in the original full-res image
                    # TODO Does this really make a difference in practice?
                    corners_unrefined = [(c[0] * x_scale, c[1] * y_scale)
                                         for c in downsampled_corners]
                    # TODO It's silly that this conversion is needed, this function should just work
                    #      on the one-channel mono image
                    mono = cv.CreateMat(rgb.rows, rgb.cols, cv.CV_8UC1)
                    cv.CvtColor(rgb, mono, cv.CV_BGR2GRAY)
                    radius = int(math.ceil(scale))
                    corners = cv.FindCornerSubPix(
                        mono, corners_unrefined, (radius, radius), (-1, -1),
                        (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
                else:
                    corners = downsampled_corners
        else:
            # Circle grid detection is fast even on large images
            (ok, corners, board) = self.get_corners(rgb)
            # Scale corners to downsampled image for display
            downsampled_corners = None
            if ok:
                #                print corners
                if scale > 1.0:
                    downsampled_corners = [(c[0] / x_scale, c[1] / y_scale)
                                           for c in corners]
                else:
                    downsampled_corners = corners

        return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))
Esempio n. 12
0
    def find(self, img):
        started = time.time()
        gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1)
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1)
        sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1)

        cv.Sobel(gray, sobel, 1, 0)
        cv.Sobel(gray, sobely, 0, 1)
        cv.Add(sobel, sobely, sobel)

        sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1)
        absnorm8(sobel, sobel8)
        cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY)

        sobel_integral = self.Cached('sobel_integral', img.height + 1,
                                     img.width + 1, cv.CV_32SC1)
        cv.Integral(sobel8, sobel_integral)

        d = 16
        _x1y1 = cv.GetSubRect(
            sobel_integral,
            (0, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x1y2 = cv.GetSubRect(
            sobel_integral,
            (0, d, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y1 = cv.GetSubRect(
            sobel_integral,
            (d, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y2 = cv.GetSubRect(
            sobel_integral,
            (d, d, sobel_integral.cols - d, sobel_integral.rows - d))

        summation = cv.CloneMat(_x2y2)
        cv.Sub(summation, _x1y2, summation)
        cv.Sub(summation, _x2y1, summation)
        cv.Add(summation, _x1y1, summation)
        sum8 = self.Cached('sum8', summation.height, summation.width,
                           cv.CV_8UC1)
        absnorm8(summation, sum8)
        cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY)

        cv.ShowImage("sum8", sum8)
        seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL)
        subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows))
        t_cull = time.time() - started

        seqs = []
        while seq:
            seqs.append(seq)
            seq = seq.h_next()

        started = time.time()
        found = {}
        print 'seqs', len(seqs)
        for seq in seqs:
            area = cv.ContourArea(seq)
            if area > 1000:
                rect = cv.BoundingRect(seq)
                edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5)
                candidate = cv.GetSubRect(subimg, rect)
                sym = self.dm.decode(
                    candidate.width,
                    candidate.height,
                    buffer(candidate.tostring()),
                    max_count=1,
                    #min_edge = 6,
                    #max_edge = int(edge)      # Units of 2 pixels
                )
                if sym:
                    onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y)
                                for (x, y) in self.dm.stats(1)[1]]
                    found[sym] = onscreen
                else:
                    print "FAILED"
        t_brute = time.time() - started
        print "cull took", t_cull, "brute", t_brute
        return found
Esempio n. 13
0
import cv2.cv as cv
import math

im = cv.LoadImage("../img/build.png", cv.CV_LOAD_IMAGE_GRAYSCALE)
im2 = cv.CloneImage(im)

# Goodfeatureto track algorithm
eigImage = cv.CreateMat(im.height, im.width, cv.IPL_DEPTH_32F)
tempImage = cv.CloneMat(eigImage)
cornerCount = 500
quality = 0.01
minDistance = 10

corners = cv.GoodFeaturesToTrack(im, eigImage, tempImage, cornerCount, quality,
                                 minDistance)

radius = 3
thickness = 2

for (x, y) in corners:
    cv.Circle(im, (int(x), int(y)), radius, (255, 255, 255), thickness)

cv.ShowImage("GoodfeaturesToTrack", im)

#SURF algorithm
hessthresh = 1500  # 400 500
dsize = 0  # 1
layers = 1  # 3 10

keypoints, descriptors = cv.ExtractSURF(im2, None, cv.CreateMemStorage(),
                                        (dsize, hessthresh, 3, layers))
Esempio n. 14
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        print "hitting run section"
        x = 0
        while True:
            #print x
            #x = x + 1
            frame = cv.QueryFrame(self.capture)
            cv.Flip(frame, frame, 1)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                print self.track_window
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect
                print self.track_window
            try:
                #prints the center x and y value of the tracked ellipse
                coord = track_box[0]
                print "center = {}".format(coord)
                if (coord[0] < 320):
                    print "move right"
                # ser.write("R")
                elif (coord[0] == 320):
                    print "do nothing"
                else:
                    print "move left"
                # ser.write("L")
            except UnboundLocalError:
                print "track_box is None"

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                print track_box
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            if not backproject_mode:
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
Esempio n. 15
0
    def iterativeMotionDetector(self, display_image):
        
        print type(display_image)
        
        size = cv.GetSize(display_image)
#         copy_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        copy_image = cv.CloneMat(cv.GetMat(display_image))
        
        self.frame_count += 1
        self.frame_t0 = time.time()
        
        print "input type " + str(type(display_image)) 
        print "input type 2" + str(type(self.thumbnail)) 

        cv.Resize(copy_image, self.thumbnail)
        
        cv.ShowImage("input image 2" , self.thumbnail)
        
        color_image = self.thumbnail  # cv.CloneImage(display_image)
        
        
        display_image = color_image

        # Smooth to get rid of false positives
        cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)
        
        # Use the Running Average as the static background            
        # a = 0.020 leaves artifacts lingering way too long.
        # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
        cv.RunningAvg(color_image, self.running_average_image, 0.320, None)
        
        
        
#             cv.ShowImage("background ", running_average_image)
        
        
        # Convert the scale of the moving average.
        cv.ConvertScale(self.running_average_image, self.running_average_in_display_color_depth, 1.0, 0.0)
        
        cv.CvtColor(color_image, self.grey_original_image, cv.CV_RGB2GRAY)
        grey_image_array = np.asarray(cv.GetMat(self.grey_original_image), np.uint8, 1)
        cv.CvtColor(self.running_average_in_display_color_depth, self.grey_average_image, cv.CV_RGB2GRAY)
        running_image_array = np.asarray(cv.GetMat(self.grey_average_image), np.uint8, 1)

        
        # Subtract the current frame from the moving average.
        cv.AbsDiff(color_image, self.running_average_in_display_color_depth, self.difference)
        print cv.GetSize(color_image)
        print cv.GetSize(self.running_average_in_display_color_depth)
        
        cv.ShowImage("difference ", self.difference)
         
        # Convert the image to greyscale.
        cv.CvtColor(self.difference, self.grey_image, cv.CV_RGB2GRAY)
         
        
        
 
        # Threshold the image to a black and white motion mask:
        cv.Threshold(self.grey_image, self.grey_image, 2, 255, cv.CV_THRESH_BINARY)
        # Smooth and threshold again to eliminate "sparkles"
        cv.Smooth(self.grey_image, self.grey_image, cv.CV_GAUSSIAN, 19, 0)
         
        cv.Threshold(self.grey_image, self.grey_image, 240, 255, cv.CV_THRESH_BINARY)
        
        cv.ShowImage("binary mask", self.grey_image)
        
        
        self.opticalFlow.makeOpticalFlow(np.asarray(copy_image, np.uint8, 3), self.grey_image)
        
        
        
         
        grey_image_as_array = np.asarray(cv.GetMat(self.grey_image))
        non_black_coords_array = np.where(grey_image_as_array > 3)
        # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
        non_black_coords_array = zip(non_black_coords_array[1], non_black_coords_array[0])
         
        points = []  # Was using this to hold either pixel coords or polygon coords.
        bounding_box_list = []
         
         
         
         
         
 
        # Now calculate movements using the white pixels as "motion" data
        contour = cv.FindContours(self.grey_image, self.mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
         
        levels = 10
        while contour:
             
            bounding_rect = cv.BoundingRect(list(contour))
            point1 = (bounding_rect[0], bounding_rect[1])
            point2 = (bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3])
             
            bounding_box_list.append((point1, point2))
            polygon_points = cv.ApproxPoly(list(contour), self.mem_storage, cv.CV_POLY_APPROX_DP)
             
            # To track polygon points only (instead of every pixel):
            # points += list(polygon_points)
             
            # Draw the contours:
            cv.DrawContours(color_image, contour, cv.CV_RGB(255, 0, 0), cv.CV_RGB(0, 255, 0), levels, 3, 0, (0, 0))
            cv.FillPoly(self.grey_image, [ list(polygon_points), ], cv.CV_RGB(255, 255, 255), 0, 0)
            cv.PolyLine(display_image, [ polygon_points, ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
            # cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)
 
            contour = contour.h_next()
         
         
        # Find the average size of the bbox (targets), then
        # remove any tiny bboxes (which are prolly just noise).
        # "Tiny" is defined as any box with 1/10th the area of the average box.
        # This reduces false positives on tiny "sparkles" noise.
        box_areas = []
        for box in bounding_box_list:
            box_width = box[right][0] - box[left][0]
            box_height = box[bottom][0] - box[top][0]
            box_areas.append(box_width * box_height)
             
            # cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)
         
        average_box_area = 0.0
        if len(box_areas): average_box_area = float(sum(box_areas)) / len(box_areas)
         
        trimmed_box_list = []
        for box in bounding_box_list:
            box_width = box[right][0] - box[left][0]
            box_height = box[bottom][0] - box[top][0]
             
            # Only keep the box if it's not a tiny noise box:
            if (box_width * box_height) > average_box_area * 0.1: trimmed_box_list.append(box)
         
        # Draw the trimmed box list:
        # for box in trimmed_box_list:
        #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )
             
        bounding_box_list = merge_collided_bboxes(trimmed_box_list)
 
        # Draw the merged box list:
        for box in bounding_box_list:
            cv.Rectangle(display_image, box[0], box[1], cv.CV_RGB(0, 255, 0), 1)
         
        # Here are our estimate points to track, based on merged & trimmed boxes:
        estimated_target_count = len(bounding_box_list)
         
        # Don't allow target "jumps" from few to many or many to few.
        # Only change the number of targets up to one target per n seconds.
        # This fixes the "exploding number of targets" when something stops moving
        # and the motion erodes to disparate little puddles all over the place.
         
        if self.frame_t0 - self.last_target_change_t < .350:  # 1 change per 0.35 secs
            estimated_target_count = self.last_target_count
        else:
            if self.last_target_count - estimated_target_count > 1: estimated_target_count = self.last_target_count - 1
            if estimated_target_count - self.last_target_count > 1: estimated_target_count = self.last_target_count + 1
            last_target_change_t = self.frame_t0
         
        # Clip to the user-supplied maximum:
        estimated_target_count = min(estimated_target_count, self.max_targets)
         
        # The estimated_target_count at this point is the maximum number of targets
        # we want to look for.  If kmeans decides that one of our candidate
        # bboxes is not actually a target, we remove it from the target list below.
         
        # Using the numpy values directly (treating all pixels as points):    
        points = non_black_coords_array
        center_points = []
         
        if len(points):
             
            # If we have all the "target_count" targets from last frame,
            # use the previously known targets (for greater accuracy).
            k_or_guess = max(estimated_target_count, 1)  # Need at least one target to look for.
            if len(self.codebook) == estimated_target_count: 
                k_or_guess = self.codebook
             
            # points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
            self.codebook, distortion = vq.kmeans(array(points), k_or_guess)
             
            # Convert to tuples (and draw it to screen)
            for center_point in self.codebook:
                center_point = (int(center_point[0]), int(center_point[1]))
                center_points.append(center_point)
                # cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                # cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)
         
        # Now we have targets that are NOT computed from bboxes -- just
        # movement weights (according to kmeans).  If any two targets are
        # within the same "bbox count", average them into a single target.  
        #
        # (Any kmeans targets not within a bbox are also kept.)
        trimmed_center_points = []
        removed_center_points = []
                     
        for box in bounding_box_list:
            # Find the centers within this box:
            center_points_in_box = []
             
            for center_point in center_points:
                if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                    center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :
                     
                    # This point is within the box.
                    center_points_in_box.append(center_point)
             
            # Now see if there are more than one.  If so, merge them.
            if len(center_points_in_box) > 1:
                # Merge them:
                x_list = y_list = []
                for point in center_points_in_box:
                    x_list.append(point[0])
                    y_list.append(point[1])
                 
                average_x = int(float(sum(x_list)) / len(x_list))
                average_y = int(float(sum(y_list)) / len(y_list))
                 
                trimmed_center_points.append((average_x, average_y))
                 
                # Record that they were removed:
                removed_center_points += center_points_in_box
                 
            if len(center_points_in_box) == 1:
                trimmed_center_points.append(center_points_in_box[0])  # Just use it.
         
        # If there are any center_points not within a bbox, just use them.
        # (It's probably a cluster comprised of a bunch of small bboxes.)
        for center_point in center_points:
            if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
                trimmed_center_points.append(center_point)
         
        # Draw what we found:
        # for center_point in trimmed_center_points:
        #    center_point = ( int(center_point[0]), int(center_point[1]) )
        #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
        #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
        #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
        #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)
         
        # Determine if there are any new (or lost) targets:
        actual_target_count = len(trimmed_center_points)
        last_target_count = actual_target_count
         
        # Now build the list of physical entities (objects)
        this_frame_entity_list = []
         
        # An entity is list: [ name, color, last_time_seen, last_known_coords ]
         
        for target in trimmed_center_points:
         
            # Is this a target near a prior entity (same physical entity)?
            entity_found = False
            entity_distance_dict = {}
             
            for entity in self.last_frame_entity_list:
                 
                entity_coords = entity[3]
                delta_x = entity_coords[0] - target[0]
                delta_y = entity_coords[1] - target[1]
         
                distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                entity_distance_dict[ distance ] = entity
             
            # Did we find any non-claimed entities (nearest to furthest):
            distance_list = entity_distance_dict.keys()
            distance_list.sort()
             
            for distance in distance_list:
                 
                # Yes; see if we can claim the nearest one:
                nearest_possible_entity = entity_distance_dict[ distance ]
                 
                # Don't consider entities that are already claimed:
                if nearest_possible_entity in this_frame_entity_list:
                    # print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                    continue
                 
                # print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                # Found the nearest entity to claim:
                entity_found = True
                nearest_possible_entity[2] = self.frame_t0  # Update last_time_seen
                nearest_possible_entity[3] = target  # Update the new location
                this_frame_entity_list.append(nearest_possible_entity)
                # log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                break
             
            if entity_found == False:
                # It's a new entity.
                color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
                name = hashlib.md5(str(self.frame_t0) + str(color)).hexdigest()[:6]
                last_time_seen = self.frame_t0
                 
                new_entity = [ name, color, last_time_seen, target ]
                this_frame_entity_list.append(new_entity)
                # log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
         
        # Now "delete" any not-found entities which have expired:
        entity_ttl = 1.0  # 1 sec.
         
        for entity in self.last_frame_entity_list:
            last_time_seen = entity[2]
            if self.frame_t0 - last_time_seen > entity_ttl:
                # It's gone.
                # log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                pass
            else:
                # Save it for next time... not expired yet:
                this_frame_entity_list.append(entity)
         
        # For next frame:
        last_frame_entity_list = this_frame_entity_list
         
        # Draw the found entities to screen:
        for entity in this_frame_entity_list:
            center_point = entity[3]
            c = entity[1]  # RGB color tuple
            cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
            cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
            cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
            cv.Circle(display_image, center_point, 5, cv.CV_RGB(c[0], c[1], c[2]), 3)


  
        # Toggle which image to show
#             if chr(c) == 'd':
#                 image_index = ( image_index + 1 ) % len( image_list )
#             
#             image_name = image_list[ image_index ]
#             
#             # Display frame to user
#             if image_name == "camera":
#                 image = camera_image
#                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
#             elif image_name == "difference":
#                 image = difference
#                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
#             elif image_name == "display":
#                 image = display_image
#                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
#             elif image_name == "threshold":
#                 # Convert the image to color.
#                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
#                 image = display_image  # Re-use display image here
#                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
#             elif image_name == "faces":
#                 # Do face detection
#                 detect_faces( camera_image, haar_cascade, mem_storage )                
#                 image = camera_image  # Re-use camera image here
#                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
#             cv.ShowImage( "Target", image )
             
             
        image1 = display_image
     
        cv.ShowImage("Target 1", image1)
         
         
#             if self.writer: 
#                 cv.WriteFrame( self.writer, image );
         
        # log_file.flush()
         
        # If only using a camera, then there is no time.sleep() needed, 
        # because the camera clips us to 15 fps.  But if reading from a file,
        # we need this to keep the time-based target clipping correct:
        frame_t1 = time.time()
         
 
        # If reading from a file, put in a forced delay:
        if not self.writer:
            delta_t = frame_t1 - self.frame_t0
            if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)
Esempio n. 16
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        i = 1
        o_x = 0
        o_y = 0
        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)
                #print track_box
                trace_val = track_box[0]
                f_x = trace_val[0]
                f_y = trace_val[1]
                print 'value1', f_x
                print 'value2', f_y
                if i % 10 == 0:
                    o_x = f_x
                    o_y = f_y
                if (f_x != o_x):
                    a = (f_x - o_x) / float(10)
                    round(a)
                    cam.Azimuth(-a)
                if (f_y != o_y):
                    a = (f_y - o_y) / float(10)
                    round(a)
                    cam.Elevation(-a)
                ren1.ResetCameraClippingRange()
                renWin.Render()
                i += 1

            if not backproject_mode:
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
Esempio n. 17
0
ipts = mk_image_points(goodcorners)
opts = mk_object_points(len(goodcorners), .1)
npts = mk_point_counts(len(goodcorners))

intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
cv.SetZero(intrinsics)
cv.SetZero(distortion)
# focal lengths have 1/1 ratio
intrinsics[0, 0] = 1.0
intrinsics[1, 1] = 1.0
cv.CalibrateCamera2(opts,
                    ipts,
                    npts,
                    cv.GetSize(images[0]),
                    intrinsics,
                    distortion,
                    cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
                    cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
                    flags=0)  # cv.CV_CALIB_ZERO_TANGENT_DIST)
print "D =", list(cvmat_iterator(distortion))
print "K =", list(cvmat_iterator(intrinsics))
mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)
for img in images:
    r = cv.CloneMat(img)
    cv.Remap(img, r, mapx, mapy)
    cv.ShowImage("snap", r)
    cv.WaitKey()
Esempio n. 18
0
              math.pi * deg / 180, r) for deg in range(0, 90, 10)]

for (msg, (x, y), angle, r) in test:
    map = cv.CreateMat(2, 3, cv.CV_32FC1)
    corners = [(x + r * math.cos(angle + th), y + r * math.sin(angle + th))
               for th in [0, math.pi / 2, math.pi, 3 * math.pi / 4]]
    src = mkdmtx(msg)
    (sx, sy) = cv.GetSize(src)
    cv.GetAffineTransform([(0, 0), (sx, 0), (sx, sy)], corners[:3], map)
    temp = cv.CreateMat(bg.rows, bg.cols, cv.CV_8UC3)
    cv.Set(temp, cv.RGB(0, 0, 0))
    cv.WarpAffine(src, temp, map)
    cv.Or(temp, bg, bg)

cv.ShowImage("comp", bg)
scribble = cv.CloneMat(bg)

if 0:
    for i in range(10):
        df.find(bg)

for (sym, coords) in df.find(bg).items():
    print sym
    cv.PolyLine(scribble, [coords],
                1,
                cv.CV_RGB(255, 0, 0),
                1,
                lineType=cv.CV_AA)
    Xs = [x for (x, y) in coords]
    Ys = [y for (x, y) in coords]
    where = ((min(Xs) + max(Xs)) / 2, max(Ys) - 50)
Esempio n. 19
0
    def detect_and_draw(self, originalImage):
        # allocate temporary images
        
        print type(originalImage)
        grayScaleFullImage = cv.CreateImage((originalImage.width, originalImage.height), 8, 1)
        smallScaleFullImage = cv.CreateImage((cv.Round(originalImage.width / image_scale),
                       cv.Round (originalImage.height / image_scale)), 8, 1)
    
        # convert color input image to grayscale
        cv.CvtColor(originalImage, grayScaleFullImage, cv.CV_BGR2GRAY)
    
        # scale input image for faster processing
        cv.Resize(grayScaleFullImage, smallScaleFullImage, cv.CV_INTER_LINEAR)
    
        cv.EqualizeHist(smallScaleFullImage, smallScaleFullImage)
    
        if(self.cascade):
            t = cv.GetTickCount()
            # detect faces
            faces = cv.HaarDetectObjects(smallScaleFullImage, self.cascade, cv.CreateMemStorage(0),
                                         haar_scale, min_neighbors, haar_flags, min_size)
            t = cv.GetTickCount() - t
            print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
            if faces:
                print "detected face"
                for ((x, y, w, h), n) in faces:
                    # the input to cv.HaarDetectObjects was resized, so scale the
                    # bounding box of each face and convert it to two CvPoints
                    pt1 = (int(x * image_scale), int(y * image_scale))
                    pt11 = (int(x * image_scale) + 10, int(y * image_scale) + 10)
                    pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                    # face 
                    cv.Rectangle(originalImage, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                    
                    if isOpticalFlow:
                        originalArray2 = cv.CloneImage(originalImage)
                        faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
                        faceArea2 = cv.CloneMat(faceArea)
                        cv.ShowImage("face area", faceArea2)
                        self.MotionDetector.iterativeMotionDetector(faceArea2)
                                        
     
                    # get the center of the rectangle
                    centerX = (pt1[0] + pt2[0]) / 2     
                    centerY = (pt1[1] + pt2[1]) / 2 + int(0.1 * w * image_scale)
                      
                    # around nose region
                    cv.Rectangle(originalImage, (centerX, centerY), (centerX + 10, centerY + 10), cv.RGB(255, 0, 255))   
                    
                         
                    # detect left eye
                    # cv.SetZero(sub)  55
                    self.detectLeftEye(originalImage, self.cascade2, pt1, centerX, centerY)
                    
                    # detect right eye
                    rightEyeArea = cv.GetSubRect(originalImage, (centerX, pt1[1], pt2[0] - centerX  , centerY - pt1[1]))
                    # cv.SetZero(rightEyeArea)    
                    self.detectRightEye(originalImage, rightEyeArea, centerX, centerY, pt1, self.cascade2)
                    
#                     self.detectNose(originalImage, cascade4, centerX, centerY)
                    
                            
                    
                
                 
                     
                    
                    # now apply mask for values in range +/- 10% of index_1
                    # form a map for showing the eyebrows
                    # cloneImageArray = cv.CloneMat(imageArray)
                    # cloneImageArray = np.empty_like (imageArray)
                    # cloneImageArray[:] = imageArray
                    # cv2.imshow("left eye " ,cloneImageArray)
                
                    # res = cv2.bitwise_and(cloneImageArray,cloneImageArray,mask = backproj)
                    # cv2.imshow("res" ,res)
                
                
                    # detect left eyebrow
                    # by doing simple contour detection
    #                 print type(leftEyeArea)
    #                 gray_im = cv.CreateMat(leftEyeArea.height, leftEyeArea.width, cv.CV_8UC1)
    #                 #gray_im = cv.CreateImage((leftEyeArea.rows, leftEyeArea.cols), cv.IPL_DEPTH_8U, 1)
    #                 print type(gray_im)
    #                 cv.CvtColor(leftEyeArea, gray_im, cv.CV_RGB2GRAY)
    #                 imageArray = np.asarray(gray_im, dtype=np.uint8)
    #                 #floatMat.convertTo(ucharMat, CV_8UC1);
    # 
    #                 # scale values from 0..1 to 0..255
    #                 #floatMat.convertTo(ucharMatScaled, CV_8UC1, 255, 0); 
    #                 contours0, hier = cv2.findContours( backproj , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # #               
    # 
    #                 cv_im = cv.CreateMat(img.width, img.height, cv.CV_8UC3)
    #                 cv.SetData(cv_im, img.tostring())
    #                     
    #                 #print type(cv_im)
    #                 
    #                 originalImageArray = np.asarray(cv_im, dtype=np.uint8)
    #                 
    #                 print " length " + str(len(contours0))   
    #                 #print type(contours0)
    #                 
    #                 lines = None
    #                 linesList = list()
    #                 for item in contours0:
    #                     #print "item " + str(item)
    #                        
    #                     #print type(item)
    #                     for i in range(1, len(item)):
    #                         #for j in range(len(item[i][0])):
    #                         #print str(item[i][0][0]) + " " + str(item[i][0][1])
    #                         #lines.append([[item[i][0][0], item[i][0][1]]])
    #                         if lines != None:
    #                             np.append(lines, item[i][0])
    #                         else:
    #                             lines = np.array(item[i][0])
    #                         linesList.append((item[i][0][0] , item[i][0][1]))
    #                         #cv2.circle(backproj, ( item[i][0][0] , item[i][0][1]), 10, (255,255,255), 10)
    #                         #cv.Circle(img, (pt1[0] + item[i][0][0] ,int(pt1[1] * 1.1)+ item[i][0][1]), 5, (255,0,255))
    #                             
    #                             
    #                
    #                 
    #                 #print type(originalImageArray)
    #                 print lines
    #                 #cv2.polylines(originalImageArray, lines, True, cv.RGB(255, 255, 0), 10)
    #                 print type(linesList)
    #                 #cv.PolyLine(cv_im, linesList, False, cv.RGB(255, 255, 0), 10)
    #                 #cv2.drawContours(backproj, contours0, , cv.RGB(55, 55, 55))
                
                    
                    
                    # canny_output = None
                    # canny_output = cv2.Canny(backproj, 700, 1000, canny_output, 7)
                    # cv2.imshow("canny ", canny_output)
                    
                    # cv.Canny(hsv_image, contours0, 10, 60);
                    # contours, hier = cv2.findContours( canny_output , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #               
    
    
    
                   
                    
                    
                    
    
                
                    # cv2.drawContours(originalImageArray,lines,-1,(0,255,0),3)
                   
                    # detect mouth
                    mouthArea = cv.GetSubRect(originalImage, (pt1[0], centerY, pt2[0] - pt1[0], pt2[1] - centerY))
                    self.detectMouth(originalImage, mouthArea, pt1, centerY, self.cascade3)
                    
                    
                    
                    
                    # start tracking face
                    if not isOpticalFlow:
                        originalArray2 = cv.CloneImage(originalImage)
                        faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
                        faceArea2 = cv.CloneMat(faceArea)
                        return (True, faceArea2, originalImage, pt1, pt2)
                        
#                         originalImage2 = cv.CloneImage(originalImage)
#                         camshift = Camshift()
#                         camshift.defineRegionOfInterest(originalImage2, pt1, pt2)

#                         originalArray2 = cv.CloneImage(originalImage)
#                         faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
#                         faceArea2 = cv.CloneMat(faceArea)
#                         cv.ShowImage("face area", faceArea2)
#                         faceArray = np.asarray(faceArea2, np.uint8, 3)
#                         faceArray = cv2.cvtColor(faceArray, cv2.COLOR_BGR2GRAY)
#                         self.matcher.defineTargetImage(faceArray)
#                         self.matcher.findInVideoSequence()
                          
                    
                    
                              
                    
      
        cv.ShowImage("result", originalImage)
        
        return (False, originalImage, None, None, None)
    def store_proba(self, proba):
        # print "Got Image"
        if not self.info:
            return
        # print "Processing"
        self.timestamp = proba.header.stamp
        I = self.br.imgmsg_to_cv(proba, "8UC1")
        self.proba = cv.CloneMat(I)
        cv.Threshold(I, self.proba, 0xFE, 0xFE, cv.CV_THRESH_TRUNC)
        try:
            # (trans,rot) = self.listener.lookupTransform(proba.header.frame_id, '/world', proba.header.stamp)
            self.listener.waitForTransform(proba.header.frame_id,
                                           self.target_frame,
                                           proba.header.stamp,
                                           rospy.Duration(1.0))
            trans = numpy.mat(
                self.listener.asMatrix(self.target_frame, proba.header))
            # print "Transformation"
            # print trans
            dstdir = [trans * v for v in self.dirpts3d]
            # print "Destination dir"
            # print dstdir
            origin = trans * self.origin
            origin = origin / origin[3, 0]
            # origin = numpy.matrix([0.0, 0.0, origin[2,0] / origin[3,0], 1.0]).T
            # print "Origin"
            # print origin

            self.dstpts2d = cv.CreateMat(4, 2, cv.CV_32F)
            for i in range(4):
                self.dstpts2d[i, 0] = self.x_floor + (origin[0, 0] - dstdir[i][
                    0, 0] * origin[2, 0] / dstdir[i][2, 0]) * self.floor_scale
                self.dstpts2d[i, 1] = self.y_floor - (origin[1, 0] - dstdir[i][
                    1, 0] * origin[2, 0] / dstdir[i][2, 0]) * self.floor_scale
            # print numpy.asarray(self.dstpts2d)

            # print "Source points"
            # print numpy.asarray(self.srcpts2d)
            # print "Dest points"
            # print numpy.asarray(self.dstpts2d)
            self.H = cv.CreateMat(3, 3, cv.CV_32F)
            cv.FindHomography(self.srcpts2d, self.dstpts2d, self.H)
            # print "Homography"
            # print numpy.asarray(self.H)

            cv.WarpPerspective(cv.GetSubRect(
                self.proba, (0, self.horizon_offset, self.proba.width,
                             self.proba.height - self.horizon_offset)),
                               self.floor_map,
                               self.H,
                               flags=cv.CV_INTER_NN + cv.CV_WARP_FILL_OUTLIERS,
                               fillval=0xFF)

            msg = self.br.cv_to_imgmsg(self.floor_map)
            msg.header.stamp = proba.header.stamp
            msg.header.frame_id = self.target_frame
            self.pub.publish(msg)
            # print "Publishing image"

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            print "Exception while looking for transform"
            return
Esempio n. 21
0
    def findObjectOfInterest(self, image):
        self.vis = image.copy()
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

#         if self.selection:
#             print "going 1"
#             x0, y0, x1, y1 = self.selection
#             self.track_window = (x0, y0, x1 - x0, y1 - y0)
#             hsv_roi = hsv[y0:y1, x0:x1]
#             mask_roi = mask[y0:y1, x0:x1]
#             hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180])
#             cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
#             self.hist = hist.reshape(-1)
#             self.show_hist()
# 
#             vis_roi = vis[y0:y1, x0:x1]
#             cv2.bitwise_not(vis_roi, vis_roi)
#             vis[mask == 0] = 0

#         if self.tracking_state == 1:
#         print "going 2"
        self.selection = None
        prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
        prob &= mask
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
#         print "input tracking window " + str(self.track_window)
        if self.track_window[2] == 0 or self.track_window[3] == 0:
            # fall back to detection
            print "returning"
            self.returnToDetection = True
            return
        else:
            track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
    
            if self.show_backproj:
                self.vis[:] = prob[..., np.newaxis]

            width = track_box[1][0]
            height = track_box[1][1]
            center = (int(track_box[0][0]), int(track_box[0][1]))
            angle = track_box[2]
#             cv2.rectangle(self.vis, point1, point2, (0, 255, 0), 2)            
#             cv2.ellipse(self.vis, track_box, (0, 0, 255), 2)
#             cv2.putText(self.vis, str(angle), (100, 100), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 1, (255, 255, 0))
#             cv2.line(self.vis, center, extrema, (0, 0, 255), 3)
            
            print "track box " + str(track_box)
    
            originalArray2 = cv.CloneMat(cv.fromarray(self.vis))
            shape = np.shape(originalArray2)           
            if angle < 180:
                rotateAngle = angle + 360
            else:
                rotateAngle = 180 + angle
            
            rotationMatrix = cv2.getRotationMatrix2D(center, rotateAngle , 1)
            shape = np.shape(self.vis)
            print shape[0 : 2]
            faceArray = np.zeros( shape[0:2], np.uint8, 3)
            faceArray2 = np.zeros((track_box[1][0], track_box[1][1]), np.uint8, 3)
            faceArray = cv2.warpAffine(self.vis, rotationMatrix, np.shape(faceArray), flags=cv2.INTER_LINEAR)
            faceArray2 = cv2.getRectSubPix(faceArray, np.shape(faceArray2), track_box[0])

            cv2.imshow("face image", faceArray2)
            
            return faceArray2