Пример #1
0
    def determineMarkerQuality_naive(self, frame_org):

        phase = np.exp((self.limitAngleToRange(-self.orientation)) * 1j)

        t1_temp = self.kernelComplex * np.power(phase, self.order)
        t1 = t1_temp.real > self.threshold

        t2_temp = self.kernelComplex * np.power(phase, self.order)
        t2 = t2_temp.real < -self.threshold

        img_t1_t2_diff = t1.astype(np.float32) - t2.astype(np.float32)

        angleThreshold = 3.14 / (2 * self.order)

        t3 = np.angle(self.KernelRemoveArmComplex * phase) < angleThreshold
        t4 = np.angle(self.KernelRemoveArmComplex * phase) > -angleThreshold
        mask = 1 - 2 * (t3 & t4)

        template = (img_t1_t2_diff) * mask
        template = cv.fromarray(1 - template)

        (xm, ym) = self.lastMarkerLocation

        y1 = ym - int(math.floor(float(self.kernelSize / 2)))
        y2 = ym + int(math.ceil(float(self.kernelSize / 2)))

        x1 = xm - int(math.floor(float(self.kernelSize / 2)))
        x2 = xm + int(math.ceil(float(self.kernelSize / 2)))

        try:
            frame = frame_org[y1:y2, x1:x2]
        except (TypeError):
            self.quality = 0
            return
        w, h = cv.GetSize(frame)
        im_dst = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        cv.Threshold(frame, im_dst, 128, 1, cv.CV_THRESH_BINARY)

        matches = 0
        blacks = 0
        w, h = cv.GetSize(im_dst)
        for x in xrange(w):
            for y in xrange(h):
                if cv.Get2D(im_dst, y, x)[0] == 0:  # if pixel is black
                    blacks += 1
                    if cv.Get2D(im_dst, y, x)[0] == cv.Get2D(template, y,
                                                             x)[0]:
                        matches += 1
                else:
                    continue

#	self.quality = float(matches)/(w*h)
        self.quality = float(matches) / blacks

        im_dst = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        cv.Threshold(frame, im_dst, 115, 255, cv.CV_THRESH_BINARY)

        cv.ShowImage("small_image", im_dst)
        cv.ShowImage("temp_kernel", template)
Пример #2
0
    def test_divider(self):
        image = cv.LoadImage(
            os.path.abspath(os.environ['BORG'] +
                            '/Brain/data/hog_test/noface.jpg'))
        subRect = (0, 0, 250, 187)
        subimage = self.divider.crop(image, subRect)
        subimage1 = image

        result = self.divider.divide(image, 2, 2)

        if cv.GetSize(subimage) != cv.GetSize(result[0]):
            self.fail(
                "The subimage sizes are not correct. Either correctly crop the image manually or check divider function"
            )

        dif = cv.CreateImage(cv.GetSize(subimage), cv.IPL_DEPTH_8U, 3)
        dif2 = cv.CreateImage(cv.GetSize(subimage), cv.IPL_DEPTH_8U, 3)

        cv.AbsDiff(subimage, result[0], dif)
        cv.Threshold(dif, dif2, 50, 255, cv.CV_THRESH_TOZERO)
        for i in range(3):
            cv.SetImageCOI(dif2, i + 1)
            n_nonzero = cv.CountNonZero(dif2)

        if n_nonzero < 400:
            threshold = 0
        else:
            threshold = 1
            self.assertEqual(threshold, 0, "The subimages are different")

        result = self.divider.divide(image, 4, 4, option="pro")
        print len(result)
        print result

        dif = cv.CreateImage(cv.GetSize(subimage1), cv.IPL_DEPTH_8U, 3)
        dif2 = cv.CreateImage(cv.GetSize(subimage1), cv.IPL_DEPTH_8U, 3)

        cv.AbsDiff(subimage1, result[0], dif)
        cv.Threshold(dif, dif2, 50, 255, cv.CV_THRESH_TOZERO)
        for i in range(3):
            cv.SetImageCOI(dif2, i + 1)
            n_nonzero = cv.CountNonZero(dif2)

        if n_nonzero < 400:
            threshold = 0
        else:
            threshold = 1
            self.assertEqual(threshold, 0, "The subimages are different")

        result = self.divider.divide(image, 4, 4, option="pro", overlap=10)
Пример #3
0
    def preproc_map_img(self, map_img):
        """ Preprocesses the map image Soft, erode or whtaever it is necessary to improve the input"""
        #Apply threshold to have just black and white
        thresh_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Threshold(map_img, thresh_img, 250, 255, cv.CV_THRESH_BINARY)

        #Blur map's thresholded image
        soft_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Smooth(thresh_img, soft_img, cv.CV_GAUSSIAN, 9, 9)

        #Dilate the inverse map to get it's skeleton
        dilated_img = cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Dilate(soft_img, dilated_img, iterations=20)

        #Create inverse image
#        dilated_inverted_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
#        for r in range(0,dilated_img.rows):
#            for c in range(0,dilated_img.cols):
#                dilated_inverted_img[r,c]=255-dilated_img[r,c]

        #Enhance image edges for hough transformdilated_img
        canny_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Canny(soft_img, canny_img, 200,220)

        preprocessed_map = dilated_img
        return preprocessed_map
Пример #4
0
    def post_process_distance_img(self, dist_img):
        inverted_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)

        #Blur image
        soft_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Smooth(dist_img, soft_img, cv.CV_GAUSSIAN, 21, 21)


        #Apply threshold to have just black and white
        thresh_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Threshold(soft_img, thresh_img, 1, 255, cv.CV_THRESH_BINARY)#CV_THRESH_OTSU is an adaptive thresholding method



#        #Create inverse image
#        for r in range(0,thresh_img.rows):
#            for c in range(0,thresh_img.cols):
#                inverted_img[r,c]=255-thresh_img[r,c]

        #Erode the inverse map to get it's skeleton
        eroded_img = cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Erode(inverted_img, eroded_img, iterations=10)

        #Create inverse image
        for r in range(0,eroded_img.rows):
            for c in range(0,eroded_img.cols):
                inverted_img[r,c]=255-eroded_img[r,c]

        return inverted_img
Пример #5
0
def preprocessing(im):
    gray = cv.CreateImage((im.width, im.height), 8, 1)
    out = cv.CreateImage((im.width,im.height),cv.IPL_DEPTH_8U,1)
    cv.CvtColor(im,gray,cv.CV_BGR2GRAY)
    cv.Threshold(gray,out,110,255,cv.CV_THRESH_BINARY_INV)
    return gray
    '''
def crack(tocrack):
    im = cv.CreateImage(cv.GetSize(tocrack), 8, 1)
    cv.Split(tocrack, None, None, im, None)
    cv.Threshold(im, im, 250, 255, cv.CV_THRESH_BINARY)

    txt = pytesser.iplimage_to_string(im)
    return txt[:-2]
Пример #7
0
    def processImage(self, curframe):
        cv.Smooth(curframe, curframe)  #Remove false positives

        if not self.absdiff_frame:  #For the first time put values in difference, temp and moving_average
            self.absdiff_frame = cv.CloneImage(curframe)
            self.previous_frame = cv.CloneImage(curframe)
            cv.Convert(
                curframe, self.average_frame
            )  #Should convert because after runningavg take 32F pictures
        else:
            cv.RunningAvg(curframe, self.average_frame,
                          0.05)  #Compute the average

        cv.Convert(self.average_frame,
                   self.previous_frame)  #Convert back to 8U frame

        cv.AbsDiff(curframe, self.previous_frame,
                   self.absdiff_frame)  # moving_average - curframe

        cv.CvtColor(
            self.absdiff_frame, self.gray_frame,
            cv.CV_RGB2GRAY)  #Convert to gray otherwise can't do threshold
        cv.Threshold(self.gray_frame, self.gray_frame, 50, 255,
                     cv.CV_THRESH_BINARY)

        cv.Dilate(self.gray_frame, self.gray_frame, None,
                  15)  #to get object blobs
        cv.Erode(self.gray_frame, self.gray_frame, None, 10)
Пример #8
0
def binary_image(image, min_level, max_level):
    new_image = cv.CreateImage((image.width, image.height), image.depth,
                               image.nChannels)
    minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(image)
    threshold = minVal * min_level + maxVal * max_level
    cv.Threshold(image, new_image, threshold, 255, cv.CV_THRESH_BINARY)
    return new_image
Пример #9
0
def difference_image(img1, img2):
    print " simg1 = simplify(img1)"
    simg1 = simplify(img1)
    print " simg2 = simplify(img2)"
    simg2 = simplify(img2)

    #dbg_image('simg1',simg1)
    #dbg_image('simg2',simg2)

    #create image buffers
    img3 = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    simg3 = cv.CloneImage(img3)
    bitimage = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    eimg3 = cv.CloneImage(bitimage)

    #process
    print " cv.AbsDiff(simg2,simg1,img3)"
    cv.AbsDiff(simg2, simg1, img3)
    print " cv.Smooth(img3,simg3)"
    cv.Smooth(img3, simg3)
    #dbg_image('simg3',simg3)
    # these threshold values must be calibrated
    #cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_TOZERO_INV)
    print " cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_BINARY)"
    cv.Threshold(simg3, bitimage, 50, 255, cv.CV_THRESH_BINARY)
    #dbg_image('bitimage',bitimage)
    print " cv.Erode(bitimage,eimg3)"
    cv.Erode(bitimage, eimg3)
    #dbg_image('eimg3',eimg3)
    return eimg3
Пример #10
0
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
    size = cv.GetSize(img) # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        cv.Zero(mhi) # clear MHI at the beginning
        orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)

    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
    idx2 = (last + 1) % N # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
    cv.CvtScale(mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100: # reject very small components
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
            cv.Line(dst,
                    center,
                    (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                     cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
                    color,
                    3,
                    cv.CV_AA,
                    0)
Пример #11
0
def lines2():
    im = cv.LoadImage('roi_edges.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)
    pi = math.pi
    x = 0
    dst = cv.CreateImage(cv.GetSize(im), 8, 1)
    cv.Canny(im, dst, 200, 200)
    cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)
    color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3)
    cv.CvtColor(im, color_dst_standard,
                cv.CV_GRAY2BGR)  #Create output image in RGB to put red lines
    lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD,
                           1, pi / 100, 71, 0, 0)
    klsum = 0
    klaver = 0
    krsum = 0
    kraver = 0

    #global k
    #k=0
    for (rho, theta) in lines[:100]:
        kl = []
        kr = []
        a = math.cos(theta)
        b = math.sin(theta)
        x0 = a * rho
        y0 = b * rho
        pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
        pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
        k = ((y0 - 1000 * (a)) - (y0 + 1000 * (a))) / ((x0 - 1000 * (-b)) -
                                                       (x0 + 1000 * (-b)))

        if abs(k) < 0.4:
            pass
        elif k > 0:
            kr.append(k)
            len_kr = len(kr)
            for i in kr:
                krsum = krsum + i
                kraver = krsum / len_kr

                cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
                        4)
        elif k < 0:
            kr.append(k)
            kl.append(k)
            len_kl = len(kl)
            for i in kl:
                klsum = klsum + i
                klaver = klsum / len_kl
                cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
                        4)
        #print k
    #  cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4)
    cv.SaveImage('lane.jpg', color_dst_standard)
    print '左车道平均斜率:', klaver, '  右车道平均斜率:', kraver
    cv.ShowImage("Hough Standard", color_dst_standard)
    cv.WaitKey(0)
Пример #12
0
    def processImage(self, frame):
        cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY)

        #Absdiff to get the difference between to the frames
        cv.AbsDiff(self.frame1gray, self.frame2gray, self.res)

        #Remove the noise and do the threshold
        cv.Smooth(self.res, self.res, cv.CV_BLUR, 5, 5)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE)
        cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV)
Пример #13
0
def find_squares4(color_img):
    """
    Finds multiple squares in image

    Steps:
    -Use Canny edge to highlight contours, and dilation to connect
    the edge segments.
    -Threshold the result to binary edge tokens
    -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
    -Filter each candidate: use Approx poly, keep only contours with 4 vertices,
    enough area, and ~90deg angles.

    Return all squares contours in one flat list of arrays, 4 x,y points each.
    """
    #select even sizes only
    width, height = (color_img.width & -2, color_img.height & -2)
    timg = cv.CloneImage(color_img)  # make a copy of input image
    gray = cv.CreateImage((width, height), 8, 1)

    # select the maximum ROI in the image
    cv.SetImageROI(timg, (0, 0, width, height))

    # down-scale and upscale the image to filter out the noise
    pyr = cv.CreateImage((width / 2, height / 2), 8, 3)
    cv.PyrDown(timg, pyr, 7)
    cv.PyrUp(pyr, timg, 7)

    tgray = cv.CreateImage((width, height), 8, 1)
    squares = []

    # Find squares in every color plane of the image
    # Two methods, we use both:
    # 1. Canny to catch squares with gradient shading. Use upper threshold
    # from slider, set the lower to 0 (which forces edges merging). Then
    # dilate canny output to remove potential holes between edge segments.
    # 2. Binary thresholding at multiple levels
    N = 11
    for c in [0, 1, 2]:
        #extract the c-th color plane
        cv.SetImageCOI(timg, c + 1)
        cv.Copy(timg, tgray, None)
        cv.Canny(tgray, gray, 0, 50, 5)
        cv.Dilate(gray, gray)
        squares = squares + find_squares_from_binary(gray)

        # Look for more squares at several threshold levels
        for l in range(1, N):
            cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255,
                         cv.CV_THRESH_BINARY)
            squares = squares + find_squares_from_binary(gray)

    return squares
Пример #14
0
	def process_image(self):
		"""Process the image from the camera in order to remove noise."""
		
		cv.CvtColor(self.colorFrame, self.currentGrayFrame, cv.CV_RGB2GRAY)
		
		# remove noise, etc.
		self.currentGrayFrame = self.reduce_image_noise(self.currentGrayFrame)
		
		# find the difference between the current and previous frame
		cv.AbsDiff(self.currentGrayFrame, self.previousGrayFrame, self.resultImage)
		
		# calculate the binary image that shows where there is change in the image
		cv.Threshold(self.resultImage, self.resultImage, 10, 255, cv.CV_THRESH_BINARY_INV)
		
		cv.Copy(self.currentGrayFrame, self.previousGrayFrame)
Пример #15
0
    def process_image(self, slider_pos):
        """
        This function finds contours, draws them and their approximation by ellipses.
        """
        stor = cv.CreateMemStorage()

        # Create the destination images
        image02 = cv.CloneImage(self.source_image)
        cv.Zero(image02)
        image04 = cv.CreateImage(cv.GetSize(self.source_image),
                                 cv.IPL_DEPTH_8U, 3)
        cv.Zero(image04)

        # Threshold the source image. This needful for cv.FindContours().
        cv.Threshold(self.source_image, image02, slider_pos, 255,
                     cv.CV_THRESH_BINARY)

        # Find all contours.
        cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_NONE, (0, 0))

        for c in contour_iterator(cont):
            # Number of points must be more than or equal to 6 for cv.FitEllipse2
            if len(c) >= 6:
                # Copy the contour into an array of (x,y)s
                PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
                for (i, (x, y)) in enumerate(c):
                    PointArray2D32f[0, i] = (x, y)

                # Draw the current contour in gray
                gray = cv.CV_RGB(100, 100, 100)
                cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0))

                # Fits ellipse to current contour.
                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)

                # Convert ellipse data from float to integer representation.
                center = (cv.Round(center[0]), cv.Round(center[1]))
                size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))

                # Draw ellipse in random color
                color = cv.CV_RGB(random.randrange(256), random.randrange(256),
                                  random.randrange(256))
                cv.Ellipse(image04, center, size, angle, 0, 360, color, 2,
                           cv.CV_AA, 0)

        # Show image. HighGUI use.
        cv.ShowImage("Result", image04)
Пример #16
0
def do_loop(self):
    # image processing
    if self.config.threshold:
        cv.Threshold(self.img_original, self.img_target,
                     self.config.pix_thresh_min, 0xff, cv.CV_THRESH_BINARY)
        cv.And(self.img_target, self.img_mask, self.img_target)
    if self.config.dilate:
        cv.Dilate(self.img_target,
                  self.img_target,
                  iterations=self.config.dilate)
    if self.config.erode:
        cv.Erode(self.img_target,
                 self.img_target,
                 iterations=self.config.erode)
    show_image(self)

    sys.stdout.write('> ')
    sys.stdout.flush()
    # keystroke processing
    ki = cv.WaitKey(0)

    # Simple character value, if applicable
    kc = None
    # Char if a common char, otherwise the integer code
    k = ki

    if 0 <= ki < 256:
        kc = chr(ki)
        k = kc
    elif 65506 < ki < 66000 and ki != 65535:
        ki2 = ki - 65506 - 30
        # modifier keys
        if ki2 >= 0:
            kc = chr(ki2)
            k = kc

    if kc:
        print '%d (%s)\n' % (ki, kc)
    else:
        print '%d\n' % ki

    if ki > 66000:
        return
    if ki < 0:
        print "Exiting on closed window"
        self.running = False
        return
    on_key(self, k)
Пример #17
0
def on_trackbar(edge_thresh):

    cv.Threshold(gray, edge, float(edge_thresh), float(edge_thresh),
                 cv.CV_THRESH_BINARY)
    #Distance transform
    cv.DistTransform(edge, dist, cv.CV_DIST_L2, cv.CV_DIST_MASK_5)

    cv.ConvertScale(dist, dist, 5000.0, 0)
    cv.Pow(dist, dist, 0.5)

    cv.ConvertScale(dist, dist32s, 1.0, 0.5)
    cv.AndS(dist32s, cv.ScalarAll(255), dist32s, None)
    cv.ConvertScale(dist32s, dist8u1, 1, 0)
    cv.ConvertScale(dist32s, dist32s, -1, 0)
    cv.AddS(dist32s, cv.ScalarAll(255), dist32s, None)
    cv.ConvertScale(dist32s, dist8u2, 1, 0)
    cv.Merge(dist8u1, dist8u2, dist8u2, None, dist8u)
    cv.ShowImage(wndname, dist8u)
Пример #18
0
def on_mouse(event, x, y, flags, param):

    if (not color_img):
        return

    if event == cv.CV_EVENT_LBUTTONDOWN:
        my_mask = None
        seed = (x, y)
        if ffill_case == 0:
            lo = up = 0
            flags = connectivity + (new_mask_val << 8)
        else:
            lo = lo_diff
            up = up_diff
            flags = connectivity + (
                new_mask_val << 8) + cv.CV_FLOODFILL_FIXED_RANGE
        b = random.randint(0, 255)
        g = random.randint(0, 255)
        r = random.randint(0, 255)

        if (is_mask):
            my_mask = mask
            cv.Threshold(mask, mask, 1, 128, cv.CV_THRESH_BINARY)

        if (is_color):

            color = cv.CV_RGB(r, g, b)
            comp = cv.FloodFill(color_img, seed, color, cv.CV_RGB(lo, lo, lo),
                                cv.CV_RGB(up, up, up), flags, my_mask)
            cv.ShowImage("image", color_img)

        else:

            brightness = cv.RealScalar((r * 2 + g * 7 + b + 5) / 10)
            comp = cv.FloodFill(gray_img, seed, brightness, cv.RealScalar(lo),
                                cv.RealScalar(up), flags, my_mask)
            cv.ShowImage("image", gray_img)

        print "%g pixels were repainted" % comp[0]

        if (is_mask):
            cv.ShowImage("mask", mask)
Пример #19
0
    def motionDetect(self, img):
        cv.Smooth(img, img, cv.CV_GAUSSIAN, 3, 0)

        cv.RunningAvg(img, self.movingAvg, 0.020, None)
        cv.ConvertScale(self.movingAvg, self.tmp, 1.0, 0.0)
        cv.AbsDiff(img, self.tmp, self.diff)
        cv.CvtColor(self.diff, self.grayImage, cv.CV_RGB2GRAY)
        cv.Threshold(self.grayImage, self.grayImage, 70,255, cv.CV_THRESH_BINARY)
        cv.Dilate(self.grayImage, self.grayImage, None, 18)#18   
        cv.Erode(self.grayImage, self.grayImage, None, 10)#10
        storage = cv.CreateMemStorage(0)
        contour = cv.FindContours(self.grayImage, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
#        points = []                                                                                      
        while contour:
            boundRect = cv.BoundingRect(list(contour))
            contour = contour.h_next()
            pt1 = (boundRect[0], boundRect[1])
            pt2 = (boundRect[0] + boundRect[2], boundRect[1] + boundRect[3])
            cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255,255,0), 1)

        return img
Пример #20
0
    def processaImagem(self):
        """
        Crio uma imagem cinza a partir da atual para o programa ficar mais rapido, crio uma imagem com a
        diferenca da imagem anterior e a imagem atual, e binarizo a imagem cinza para filtrar pixels pequenos.
        """
        # Remove os falsos positivos.
        cv.Smooth(self.imagem_atual, self.imagem_atual)

        # Aqui eu coloco um tempo de execucao entre as imagens.
        cv.RunningAvg(self.imagem_atual, self.imagem_auxiliar, 0.05)

        # Covertendo de volta a imagem para poder trabalhar.
        cv.Convert(self.imagem_auxiliar, self.imagem_anterior)

        # Cria uma nova imagem com a diferenca entre a imagem anterior e a atual.
        cv.AbsDiff(self.imagem_atual, self.imagem_anterior, self.imagem_diferenca)

        # Converte a imagem atual em escala de cinza.
        cv.CvtColor(self.imagem_diferenca, self.imagem_cinza, cv.CV_RGB2GRAY)

        # Binariza a imagem. Para poder filtrar pixels pequenos.
        cv.Threshold(self.imagem_cinza, self.imagem_cinza, 50, 255, cv.CV_THRESH_BINARY)
    def process_image(self, slider_pos):
        global cimg, source_image1, ellipse_size, maxf, maxs, eoc, lastcx, lastcy, lastr
        """
        This function finds contours, draws them and their approximation by ellipses.
        """
        stor = cv.CreateMemStorage()

        # Create the destination images
        cimg = cv.CloneImage(self.source_image)
        cv.Zero(cimg)
        image02 = cv.CloneImage(self.source_image)
        cv.Zero(image02)
        image04 = cv.CreateImage(cv.GetSize(self.source_image),
                                 cv.IPL_DEPTH_8U, 3)
        cv.Zero(image04)

        # Threshold the source image. This needful for cv.FindContours().
        cv.Threshold(self.source_image, image02, slider_pos, 255,
                     cv.CV_THRESH_BINARY)

        # Find all contours.
        cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_NONE, (0, 0))

        maxf = 0
        maxs = 0
        size1 = 0

        for c in contour_iterator(cont):
            if len(c) > ellipse_size:
                PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
                for (i, (x, y)) in enumerate(c):
                    PointArray2D32f[0, i] = (x, y)

                # Draw the current contour in gray
                gray = cv.CV_RGB(100, 100, 100)
                cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0))

                if iter == 0:
                    strng = segF + '/' + 'contour1.png'
                    cv.SaveImage(strng, image04)
                color = (255, 255, 255)

                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)

                # Convert ellipse data from float to integer representation.
                center = (cv.Round(center[0]), cv.Round(center[1]))
                size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))

                if iter == 1:
                    if size[0] > size[1]:
                        size2 = size[0]
                    else:
                        size2 = size[1]

                    if size2 > size1:
                        size1 = size2
                        size3 = size

                # Fits ellipse to current contour.
                if eoc == 0 and iter == 2:
                    rand_val = abs((lastr - ((size[0] + size[1]) / 2)))
                    if rand_val > 20 and float(max(size[0], size[1])) / float(
                            min(size[0], size[1])) < 1.5:
                        lastcx = center[0]
                        lastcy = center[1]
                        lastr = (size[0] + size[1]) / 2

                    if rand_val > 20 and float(max(size[0], size[1])) / float(
                            min(size[0], size[1])) < 1.4:
                        cv.Ellipse(cimg, center, size, angle, 0, 360, color, 2,
                                   cv.CV_AA, 0)
                        cv.Ellipse(source_image1, center, size, angle, 0, 360,
                                   color, 2, cv.CV_AA, 0)

                elif eoc == 1 and iter == 2:
                    (int, cntr, rad) = cv.MinEnclosingCircle(PointArray2D32f)
                    cntr = (cv.Round(cntr[0]), cv.Round(cntr[1]))
                    rad = (cv.Round(rad))
                    if maxf == 0 and maxs == 0:
                        cv.Circle(cimg, cntr, rad, color, 1, cv.CV_AA, shift=0)
                        cv.Circle(source_image1,
                                  cntr,
                                  rad,
                                  color,
                                  2,
                                  cv.CV_AA,
                                  shift=0)
                        maxf = rad
                    elif (maxf > 0 and maxs == 0) and abs(rad - maxf) > 30:
                        cv.Circle(cimg, cntr, rad, color, 2, cv.CV_AA, shift=0)
                        cv.Circle(source_image1,
                                  cntr,
                                  rad,
                                  color,
                                  2,
                                  cv.CV_AA,
                                  shift=0)
                        maxs = len(c)
        if iter == 1:
            temp3 = 2 * abs(size3[1] - size3[0])
            if (temp3 > 40):
                eoc = 1
Пример #22
0
cv.CornerHarris(im, dst_32f, neighbourhood, aperture, k)

minv, maxv, minl, maxl = cv.MinMaxLoc(dst_32f)

dilated = cv.CloneImage(dst_32f)
cv.Dilate(
    dst_32f, dilated
)  # By this way we are sure that pixel with local max value will not be changed, and all the others will

localMax = cv.CreateMat(dst_32f.height, dst_32f.width, cv.CV_8U)
cv.Cmp(
    dst_32f, dilated, localMax, cv.CV_CMP_EQ
)  #compare allow to keep only non modified pixel which are local maximum values which are corners.

threshold = 0.01 * maxv
cv.Threshold(dst_32f, dst_32f, threshold, 255, cv.CV_THRESH_BINARY)

cornerMap = cv.CreateMat(dst_32f.height, dst_32f.width, cv.CV_8U)
cv.Convert(dst_32f, cornerMap)  #Convert to make the and
cv.And(cornerMap, localMax, cornerMap)  #Delete all modified pixels

radius = 3
thickness = 2

l = []
for x in range(
        cornerMap.height
):  #Create the list of point take all pixel that are not 0 (so not black)
    for y in range(cornerMap.width):
        if cornerMap[x, y]:
            l.append((y, x))
Пример #23
0
h = frame2gray.height
nb_pixels = frame2gray.width * frame2gray.height

while True:
    frame2 = cv.QueryFrame(capture)
    cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY)

    cv.AbsDiff(frame1gray, frame2gray, res)
    cv.ShowImage("After AbsDiff", res)

    cv.Smooth(res, res, cv.CV_BLUR, 5, 5)
    element = cv.CreateStructuringElementEx(5 * 2 + 1, 5 * 2 + 1, 5, 5,
                                            cv.CV_SHAPE_RECT)
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN)
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE)
    cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY_INV)

    cv.ShowImage("Image", frame2)
    cv.ShowImage("Res", res)

    # -----------
    nb = 0
    for y in range(h):
        for x in range(w):
            if res[y, x] == 0.0:
                nb += 1
    avg = (nb * 100.0) / nb_pixels
    # print "Average: ",avg, "%\r",
    if avg >= 5:
        print
        "Something is moving !"
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        width = frame.width
        height = frame.height
        surface = width * height  # Surface area of the image
        cursurface = 0  # Hold the current surface that have changed

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            color_image = cv.QueryFrame(self.capture)

            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3,
                      0)  # Remove false positives

            if not difference:  # For the first time put values in difference, temp and moving_average
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020,
                              None)  # Compute the average

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image so that it can be thresholded
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            cv.Dilate(grey_image, grey_image, None, 18)  # to get object blobs
            cv.Erode(grey_image, grey_image, None, 10)

            # Find contours
            storage = cv.CreateMemStorage(0)
            contours = cv.FindContours(grey_image, storage,
                                       cv.CV_RETR_EXTERNAL,
                                       cv.CV_CHAIN_APPROX_SIMPLE)

            backcontours = contours  # Save contours

            while contours:  # For all contours compute the area
                cursurface += cv.ContourArea(contours)
                contours = contours.h_next()

            avg = (
                cursurface * 100
            ) / surface  # Calculate the average of contour area on the total size
            if avg > self.ceil:
                print("Something is moving !")
                ring = IntrusionAlarm()
                ring.run()

            # print avg,"%"
            cursurface = 0  # Put back the current surface to 0

            # Draw the contours on the image
            _red = (0, 0, 255)
            # Red for external contours
            _green = (0, 255, 0)
            # Gren internal contours
            levels = 1  # 1 contours drawn, 2 internal contours as well, 3 ...
            cv.DrawContours(color_image, backcontours, _red, _green, levels, 2,
                            cv.CV_FILLED)

            cv.ShowImage("Virtual Eye", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            elif c == 99:
                cv2.destroyWindow('Warning!!!')
Пример #25
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        #nframes =+ 1

        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        def totuple(a):
            try:
                return tuple(totuple(i) for i in a)
            except TypeError:
                return a

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, .1, None)
                cv.ShowImage("BG", moving_average)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)
            #cv.ShowImage("BG",difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.ShowImage("BG1", grey_image)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 40, 255, cv.CV_THRESH_BINARY)
            #cv.ShowImage("BG2", grey_image)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 8)
            cv.Erode(grey_image, grey_image, None, 3)
            cv.ShowImage("BG3", grey_image)

            storage = cv.CreateMemStorage(0)
            global contour
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            points = []

            while contour:
                global bound_rect
                bound_rect = cv.BoundingRect(list(contour))
                polygon_points = cv.ApproxPoly(list(contour), storage,
                                               cv.CV_POLY_APPROX_DP)
                contour = contour.h_next()

                global pt1, pt2
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])

                #size control
                if (bound_rect[0] - bound_rect[2] >
                        10) and (bound_rect[1] - bound_rect[3] > 10):

                    points.append(pt1)
                    points.append(pt2)

                    #points += list(polygon_points)
                    global box, box2, box3, box4, box5
                    box = cv.MinAreaRect2(polygon_points)
                    box2 = cv.BoxPoints(box)
                    box3 = np.int0(np.around(box2))
                    box4 = totuple(box3)
                    box5 = box4 + (box4[0], )

                    cv.FillPoly(grey_image, [
                        list(polygon_points),
                    ], cv.CV_RGB(255, 255, 255), 0, 0)
                    cv.PolyLine(color_image, [
                        polygon_points,
                    ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                    cv.PolyLine(color_image, [list(box5)], 0, (0, 0, 255), 2)
                    #cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

                    if len(points):
                        #center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                        center1 = (pt1[0] + pt2[0]) / 2
                        center2 = (pt1[1] + pt2[1]) / 2
                        #print center1, center2, center_point
                        #cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                        #cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                        #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                        cv.Circle(color_image, (center1, center2), 5,
                                  cv.CV_RGB(0, 0, 255), -1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                #cv.DestroyAllWindows()
                break
    def store_proba(self, proba):
        # print "Got Image"
        if not self.info:
            return
        # print "Processing"
        self.timestamp = proba.header.stamp
        I = self.br.imgmsg_to_cv(proba, "8UC1")
        self.proba = cv.CloneMat(I)
        cv.Threshold(I, self.proba, 0xFE, 0xFE, cv.CV_THRESH_TRUNC)
        try:
            # (trans,rot) = self.listener.lookupTransform(proba.header.frame_id, '/world', proba.header.stamp)
            self.listener.waitForTransform(proba.header.frame_id,
                                           self.target_frame,
                                           proba.header.stamp,
                                           rospy.Duration(1.0))
            trans = numpy.mat(
                self.listener.asMatrix(self.target_frame, proba.header))
            # print "Transformation"
            # print trans
            dstdir = [trans * v for v in self.dirpts3d]
            # print "Destination dir"
            # print dstdir
            origin = trans * self.origin
            origin = origin / origin[3, 0]
            # origin = numpy.matrix([0.0, 0.0, origin[2,0] / origin[3,0], 1.0]).T
            # print "Origin"
            # print origin

            self.dstpts2d = cv.CreateMat(4, 2, cv.CV_32F)
            for i in range(4):
                self.dstpts2d[i, 0] = self.x_floor + (origin[0, 0] - dstdir[i][
                    0, 0] * origin[2, 0] / dstdir[i][2, 0]) * self.floor_scale
                self.dstpts2d[i, 1] = self.y_floor - (origin[1, 0] - dstdir[i][
                    1, 0] * origin[2, 0] / dstdir[i][2, 0]) * self.floor_scale
            # print numpy.asarray(self.dstpts2d)

            # print "Source points"
            # print numpy.asarray(self.srcpts2d)
            # print "Dest points"
            # print numpy.asarray(self.dstpts2d)
            self.H = cv.CreateMat(3, 3, cv.CV_32F)
            cv.FindHomography(self.srcpts2d, self.dstpts2d, self.H)
            # print "Homography"
            # print numpy.asarray(self.H)

            cv.WarpPerspective(cv.GetSubRect(
                self.proba, (0, self.horizon_offset, self.proba.width,
                             self.proba.height - self.horizon_offset)),
                               self.floor_map,
                               self.H,
                               flags=cv.CV_INTER_NN + cv.CV_WARP_FILL_OUTLIERS,
                               fillval=0xFF)

            msg = self.br.cv_to_imgmsg(self.floor_map)
            msg.header.stamp = proba.header.stamp
            msg.header.frame_id = self.target_frame
            self.pub.publish(msg)
            # print "Publishing image"

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            print "Exception while looking for transform"
            return
Пример #27
0
    #CONTOUR  MAKING CODE

    # create the image where we want to display results
    image = cv.CreateImage((_SIZE, _SIZE), 8, 1)

    # start with an empty image
    cv.SetZero(image)

    im = cv.LoadImage("C:\\3d-Model\\bin\\segmentation_files\\pic_seg.jpg",
                      cv.CV_LOAD_IMAGE_COLOR)
    image = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, image, cv.CV_BGR2GRAY)
    threshold = 51
    colour = 255
    cv.Threshold(image, image, threshold, colour, cv.CV_THRESH_BINARY)

    # create the window for the contours
    cv.NamedWindow("contours", cv.CV_WINDOW_NORMAL)

    # create the trackbar, to enable the change of the displayed level
    cv.CreateTrackbar("levels+3", "contours", 3, 7, on_contour)

    # create the storage area for contour image
    storage = cv.CreateMemStorage(0)

    # find the contours
    contours = cv.FindContours(image, storage, cv.CV_RETR_TREE,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))

    # polygon approxomation
Пример #28
0
    def run(self):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )

        print "hello"

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)

        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 3  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 5

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            cv.ShowImage("difference ", difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            #             cv.SegmentMotion(non_black_coords_array, None, storage, timestamp, seg_thresh)

            # print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show


#             if chr(c) == 'd':
#                 image_index = ( image_index + 1 ) % len( image_list )
#
#             image_name = image_list[ image_index ]
#
#             # Display frame to user
#             if image_name == "camera":
#                 image = camera_image
#                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
#             elif image_name == "difference":
#                 image = difference
#                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
#             elif image_name == "display":
#                 image = display_image
#                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
#             elif image_name == "threshold":
#                 # Convert the image to color.
#                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
#                 image = display_image  # Re-use display image here
#                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
#             elif image_name == "faces":
#                 # Do face detection
#                 detect_faces( camera_image, haar_cascade, mem_storage )
#                 image = camera_image  # Re-use camera image here
#                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
#             cv.ShowImage( "Target", image )

            image1 = display_image

            cv.ShowImage("Target 1", image1)

            #             if self.writer:
            #                 cv.WriteFrame( self.writer, image );

            # log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
Пример #29
0
import cv2.cv as cv

image = cv.LoadImage('../img/build.png', cv.CV_LOAD_IMAGE_GRAYSCALE)

#Get edges
morphed = cv.CloneImage(image)
cv.MorphologyEx(image, morphed, None, None,
                cv.CV_MOP_GRADIENT)  # Apply a dilate - Erode

cv.Threshold(morphed, morphed, 30, 255, cv.CV_THRESH_BINARY_INV)

cv.ShowImage("Image", image)
cv.ShowImage("Morphed", morphed)

cv.WaitKey(0)
Пример #30
0
    def find(self, img):
        started = time.time()
        gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1)
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1)
        sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1)

        cv.Sobel(gray, sobel, 1, 0)
        cv.Sobel(gray, sobely, 0, 1)
        cv.Add(sobel, sobely, sobel)

        sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1)
        absnorm8(sobel, sobel8)
        cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY)

        sobel_integral = self.Cached('sobel_integral', img.height + 1,
                                     img.width + 1, cv.CV_32SC1)
        cv.Integral(sobel8, sobel_integral)

        d = 16
        _x1y1 = cv.GetSubRect(
            sobel_integral,
            (0, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x1y2 = cv.GetSubRect(
            sobel_integral,
            (0, d, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y1 = cv.GetSubRect(
            sobel_integral,
            (d, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y2 = cv.GetSubRect(
            sobel_integral,
            (d, d, sobel_integral.cols - d, sobel_integral.rows - d))

        summation = cv.CloneMat(_x2y2)
        cv.Sub(summation, _x1y2, summation)
        cv.Sub(summation, _x2y1, summation)
        cv.Add(summation, _x1y1, summation)
        sum8 = self.Cached('sum8', summation.height, summation.width,
                           cv.CV_8UC1)
        absnorm8(summation, sum8)
        cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY)

        cv.ShowImage("sum8", sum8)
        seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL)
        subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows))
        t_cull = time.time() - started

        seqs = []
        while seq:
            seqs.append(seq)
            seq = seq.h_next()

        started = time.time()
        found = {}
        print 'seqs', len(seqs)
        for seq in seqs:
            area = cv.ContourArea(seq)
            if area > 1000:
                rect = cv.BoundingRect(seq)
                edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5)
                candidate = cv.GetSubRect(subimg, rect)
                sym = self.dm.decode(
                    candidate.width,
                    candidate.height,
                    buffer(candidate.tostring()),
                    max_count=1,
                    #min_edge = 6,
                    #max_edge = int(edge)      # Units of 2 pixels
                )
                if sym:
                    onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y)
                                for (x, y) in self.dm.stats(1)[1]]
                    found[sym] = onscreen
                else:
                    print "FAILED"
        t_brute = time.time() - started
        print "cull took", t_cull, "brute", t_brute
        return found