コード例 #1
0
    def find_blobs(self, frame, debug_image):
        '''Find blobs in an image.

        Hopefully this gets blobs that correspond with
        buoys, but any intelligent checking is done outside of this function.

        '''

        # Get Channels
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        saturation = libvision.misc.get_channel(hsv, 1)
        red = libvision.misc.get_channel(frame, 2)

        # Adaptive Threshold
        cv.AdaptiveThreshold(
            saturation,
            saturation,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.saturation_adaptive_thresh_blocksize -
            self.saturation_adaptive_thresh_blocksize % 2 + 1,
            self.saturation_adaptive_thresh,
        )
        cv.AdaptiveThreshold(
            red,
            red,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY,
            self.red_adaptive_thresh_blocksize -
            self.red_adaptive_thresh_blocksize % 2 + 1,
            -1 * self.red_adaptive_thresh,
        )

        kernel = cv.CreateStructuringElementEx(9, 9, 4, 4, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(saturation, saturation, kernel, 1)
        cv.Dilate(saturation, saturation, kernel, 1)
        cv.Erode(red, red, kernel, 1)
        cv.Dilate(red, red, kernel, 1)

        buoys_filter = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.And(saturation, red, buoys_filter)

        if debug_image:
            svr.debug("Saturation", saturation)
            svr.debug("Red", red)
            svr.debug("AdaptiveThreshold", buoys_filter)

        # Get blobs
        labeled_image = cv.CreateImage(cv.GetSize(buoys_filter), 8, 1)
        blobs = libvision.blob.find_blobs(buoys_filter, labeled_image,
                                          MIN_BLOB_SIZE, 10)

        return blobs, labeled_image
コード例 #2
0
    def process(self):

        def seq_to_iter(seq):
            while seq:
                yield seq
                seq = seq.h_next()

        def score_rect(r,p):
            x,y,w,h = r
            return w * h

        cv.Resize(self.frame, self.resized_frame)
        cv.CvtColor(self.resized_frame, self.hsv_frame, cv.CV_RGB2HSV)
        cv.Smooth(self.hsv_frame, self.smooth_frame, cv.CV_GAUSSIAN,
                31)

        for p in self.pompon:
            if p.calibration_done:

                self.in_range(p, self.smooth_frame, self.bin_frame)
                cv.Erode(self.bin_frame, self.mask, None, self.dilatation);

                if self.show_binary:
                    self.mask2 = cv.CloneImage(self.mask) # for miniature

                contour = seq_to_iter(cv.FindContours(self.mask,
                        cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL,
                        cv.CV_CHAIN_APPROX_SIMPLE));

                rects = map((lambda c: cv.BoundingRect(c, 0)), contour)
                if rects:
                    x,y,w,h = max(rects, key=lambda r: score_rect(r,p))

                    p.pos = self.proc2sym(x+w/2,y+h/2)
コード例 #3
0
def findPoints(frame, oldRectPoints):
    imageSize = cv.GetSize(frame)
    original = cv.CloneImage(frame)
    hsv = cv.CreateImage(imageSize, 8, 3)
    threshold = cv.CreateImage(imageSize, 8, 1)

    # Do things to the image to isolate the red parts

    cv.CvtColor(original, hsv, cv.CV_RGB2HSV)

    cv.InRangeS(hsv, (110, 80, 80), (140, 255, 255), threshold)
    cv.Erode(threshold, threshold, iterations=5)
    cv.Dilate(threshold, threshold, iterations=5)
    cv.ShowImage("shit", threshold)

    memory = cv.CreateMemStorage(0)
    clone = cv.CloneImage(threshold)
    contours = cv.FindContours(clone, memory, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))

    #   area = cv.ContourArea(contours)
    if not contours:
        # If there's no red on the screen
        rectPoints = oldRectPoints
    else:
        rectPoints = cv.BoundingRect(contours)
    # print rectPoints

    return rectPoints
コード例 #4
0
ファイル: experimenting-3.py プロジェクト: vcs96/drummaster
    def findRectPoints(self, oldRectPoints):
        hueRange = self.hueRange
        clone = cv.CloneImage(self.frame)
        hsv = cv.CloneImage(self.channels3)
        threshold = cv.CloneImage(self.channels1)
        threshold2 = cv.CloneImage(self.channels1)

        cv.CvtColor(clone, hsv, cv.CV_RGB2HSV)

        cv.InRangeS(hsv, (165, 100, 100), (180, 255, 255), threshold)
        cv.InRangeS(hsv, (0, 100, 100), (15, 255, 255), threshold2)
        cv.Add(threshold, threshold2, threshold)
        self.hue += 1
        print self.hue
        cv.Erode(threshold, threshold, iterations=5)
        cv.Dilate(threshold, threshold, iterations=5)

        cv.ShowImage(self.color, threshold)

        memory = cv.CreateMemStorage(0)
        clone2 = cv.CloneImage(threshold)
        contours = cv.FindContours(clone2, memory, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
        if not contours:
            rectPoints = oldRectPoints
        else:
            rectPoints = cv.BoundingRect(contours)
        return rectPoints
コード例 #5
0
    def findRectPoints(self, oldRectPoints):
        hueRange = self.hueRange
        satRange = self.satRange
        valRange = self.valRange
        clone = cv.CloneImage(self.frame)
        hsv = cv.CloneImage(self.channels3)
        threshold = cv.CloneImage(self.channels1)
        threshold2 = cv.CloneImage(self.channels1)

        cv.Smooth(clone, clone, cv.CV_GAUSSIAN, 7, 7)

        cv.CvtColor(clone, hsv, cv.CV_BGR2HSV)
        cv.InRangeS(hsv, (hueRange[0], satRange[0], valRange[0]),
                    (hueRange[1], satRange[1], satRange[1]), threshold)
        cv.InRangeS(hsv, (hueRange[2], satRange[0], satRange[0]),
                    (hueRange[3], satRange[1], valRange[1]), threshold2)
        cv.Add(threshold, threshold2, threshold)
        cv.Erode(threshold, threshold, iterations=5)
        cv.Dilate(threshold, threshold, iterations=5)

        #       cv.ShowImage(self.color, threshold)

        memory = cv.CreateMemStorage(0)
        clone2 = cv.CloneImage(threshold)
        contours = cv.FindContours(clone2, memory, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
        if not contours:
            rectPoints = oldRectPoints
        else:
            rectPoints = cv.BoundingRect(list(contours))
        return rectPoints
コード例 #6
0
def threshold_red(image):
    #bright cv.AdaptiveThreshold(image,red_adaptive,255,cv.CV_ADAPTIVE_THRESH_MEAN_C,cv.CV_THRESH_BINARY,17,-30)
    cv.AdaptiveThreshold(image, red_adaptive, 255,
                         cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY, 17,
                         -14)
    cv.Erode(red_adaptive, red_eroded_image, None, 1)
    cv.Dilate(red_eroded_image, red_dilated_image, None, 5)
コード例 #7
0
def find_lines(frame):
    # Resize to 640x480
    frame_small = cv.CreateMat(480, 640, cv.CV_8UC3)
    cv.Resize(frame, frame_small)

    # Threshold by distance: blank out all top pixels
    cv.Rectangle(frame_small, (0, 0), (640, 80), (0, 0, 0, 0), cv.CV_FILLED)

    frame_size = cv.GetSize(frame_small)
    frame_gray = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    edges = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(frame_small, frame_gray, cv.CV_BGR2GRAY)

    cv.Canny(frame_gray, edges, 400, 400)
    cv.Dilate(edges, edges,
              cv.CreateStructuringElementEx(3, 3, 0, 0, cv.CV_SHAPE_RECT))
    cv.Erode(edges, edges,
             cv.CreateStructuringElementEx(1, 1, 0, 0, cv.CV_SHAPE_RECT))

    line_storage = cv.CreateMemStorage()
    lines = cv.HoughLines2(edges, line_storage, cv.CV_HOUGH_PROBABILISTIC, 1,
                           cv.CV_PI / 180.0, 300, 100, 40)
    print len(lines), 'lines found'
    for i in range(len(lines)):
        line = lines[i]
        cv.Line(frame_small, line[0], line[1],
                hv2rgb(360.0 * i / len(lines), 1.0), 3, 8)

    cv.ShowImage('frame', frame_small)
    cv.ShowImage('edges', edges)
コード例 #8
0
        def get_dirmarker(img, angle, Dist, radius):
            X, Y = entCenter(robot)
            Len, _ = entSize(robot)
            point = (X + (Dist + Len / 2.0) * cos(angle),
                     Y - (Dist + Len / 2.0) * sin(angle))
            point = intPoint(point)

            #For visualisation:
            # cv.Circle( frame, point, radius, (0,200,200), 1 )

            point2 = point[0] - nhood[0], point[1] - nhood[1]
            out = cv.CloneImage(img)
            cv.Zero(out)
            cv.Circle(out, point2, radius, (255, 255, 255), -1)

            cv.And(out, img2, out)
            center1 = self.centralMoment(out)
            count1 = cv.CountNonZero(out)

            cv.Erode(out, out)
            center2 = self.centralMoment(out)
            count2 = cv.CountNonZero(out)

            if count2 == 0 and count1 > 10:
                return center1
            else:
                return center2
コード例 #9
0
def threshold_green(image):
    #cv.InRange(blurred_image,GREEN_MIN,GREEN_MAX,green_adaptive)
    cv.AdaptiveThreshold(image, green_adaptive, 255,
                         cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV,
                         101, 40)  #25
    cv.Erode(green_adaptive, green_eroded_image, None, 5)  #9
    cv.Dilate(green_eroded_image, green_dilated_image, None, 6)  #27
コード例 #10
0
def get_mask_with_contour(img,
                          ret_img=False,
                          ret_cont=False,
                          with_init_mask=False,
                          cont_color=cv.RGB(255, 50, 50),
                          normalize=True,
                          skin_version=1,
                          strong=False):
    if normalize:
        img = normalize_rgb(img, aggressive=0.005)
    mask = skin_mask(img) if skin_version == 1 else skin_mask2(img)

    di_mask = image_empty_clone(mask)
    cv.Dilate(mask, di_mask)

    seqs = cv.FindContours(cv.CloneImage(di_mask), memory(),
                           cv.CV_RETR_EXTERNAL)

    c_img = image_empty_clone(mask)
    cv.DrawContours(c_img, seqs, 255, 255, 10, -1)

    er_img = image_empty_clone(c_img)
    cv.Erode(c_img, er_img, iterations=2)

    seqs = cv.FindContours(cv.CloneImage(er_img), memory(),
                           cv.CV_RETR_EXTERNAL)
    if not seqs:
        print "no areas"
        return img, None
    seqs = cv.ApproxPoly(seqs,
                         memory(),
                         cv.CV_POLY_APPROX_DP,
                         parameter=3,
                         parameter2=1)

    result = []
    if ret_img:
        #        er_seq_img = cv.CreateImage(sizeOf(er_img), 8, 3)
        #        cv.Zero(er_seq_img)
        er_seq_img = cv.CloneImage(img)
        if with_init_mask:
            cv.Merge(mask, mask, mask, None, er_seq_img)

        if strong:
            cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=3)
            cv.DrawContours(er_seq_img,
                            seqs,
                            cv.RGB(0, 0, 0),
                            0,
                            10,
                            thickness=1)
        else:
            cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=1)
        result.append(er_seq_img)

    if ret_cont:
        result.append(seqs)

    return result
コード例 #11
0
def threshhold(img):
    bwdst = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(img, bwdst, cv.CV_BGR2GRAY)
    cv.AdaptiveThreshold(bwdst, bwdst, 255.0, cv.CV_THRESH_BINARY,
                         cv.CV_ADAPTIVE_THRESH_MEAN_C, 11)
    cv.Dilate(bwdst, bwdst)
    cv.Erode(bwdst, bwdst)
    return bwdst
コード例 #12
0
    def create_image(self):

        #Find the size of the image

        #Create images for each channel
        blue = cv.CreateImage(self.size, 8, 1)
        red = cv.CreateImage(self.size, 8, 1)
        green = cv.CreateImage(self.size, 8, 1)

        hue = cv.CreateImage(self.size, 8, 1)
        sat = cv.CreateImage(self.size, 8, 1)
        val = cv.CreateImage(self.size, 8, 1)

        #Create an image to be returned and eventually displayed
        thresholds = cv.CreateImage(self.size, 8, 1)

        #Create images to save the thresholded images to
        red_threshed = cv.CreateImage(self.size, 8, 1)
        green_threshed = cv.CreateImage(self.size, 8, 1)
        blue_threshed = cv.CreateImage(self.size, 8, 1)

        hue_threshed = cv.CreateImage(self.size, 8, 1)
        sat_threshed = cv.CreateImage(self.size, 8, 1)
        val_threshed = cv.CreateImage(self.size, 8, 1)

        #Split the image up into channels, saving them in their respective image
        cv.Split(self.image, blue, green, red, None)
        cv.CvtColor(self.image, self.hsv, cv.CV_RGB2HSV)
        cv.Split(self.hsv, hue, sat, val, None)

        #Threshold the images based on the slider values
        cv.InRangeS(red, self.thresholds['low_red'],\
                    self.thresholds['high_red'], red_threshed)
        cv.InRangeS(green, self.thresholds['low_green'],\
                    self.thresholds['high_green'], green_threshed)
        cv.InRangeS(blue, self.thresholds['low_blue'],\
                    self.thresholds['high_blue'], blue_threshed)

        cv.InRangeS(hue, self.thresholds['low_hue'],\
                    self.thresholds['high_hue'], hue_threshed)
        cv.InRangeS(sat, self.thresholds['low_sat'],\
                    self.thresholds['high_sat'], sat_threshed)
        cv.InRangeS(val, self.thresholds['low_val'],\
                    self.thresholds['high_val'], val_threshed)

        #Recombine all of the thresholded images into one image
        cv.Mul(red_threshed, green_threshed, thresholds)
        cv.Mul(thresholds, blue_threshed, thresholds)
        cv.Mul(thresholds, hue_threshed, thresholds)
        cv.Mul(thresholds, sat_threshed, thresholds)
        cv.Mul(thresholds, val_threshed, thresholds)

        #Erode and Dilate shave off and add edge pixels respectively
        cv.Erode(thresholds, thresholds, iterations=1)
        cv.Dilate(thresholds, thresholds, iterations=1)

        return thresholds
コード例 #13
0
def track(bgr_image, threshold=100):
    '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100).
       Returns: (x,y) coordinates of centroid if found
                (-1,-1) if no centroid was found
                None if user hit ESC
    '''
    
    # Extract bytes, width, and height
    bgr_bytes = bgr_image.tostring()
    width = bgr_image.width
    height = bgr_image.height
    
    # Create separate red, green, and blue image matrices from bytes
    r_image = _create_grayscale_mat(bgr_bytes, width, height, 2)
    b_image = _create_grayscale_mat(bgr_bytes, width, height, 0)
    g_image = _create_grayscale_mat(bgr_bytes, width, height, 1)

    # Remove 1/3 of red and blue components from green
    threes_image = cv.CreateImage((width,height), cv.IPL_DEPTH_8U, 1)  
    cv.Set(threes_image, 3)
    _div_and_sub(g_image, r_image, threes_image)
    _div_and_sub(g_image, b_image, threes_image)

    # Threshold and erode green image
    cv.Threshold(g_image, g_image, threshold, 255, cv.CV_THRESH_BINARY)
    cv.Erode(g_image, g_image)

    # Find centroid of eroded image
    moments = cv.Moments(cv.GetMat(g_image), 1) # binary flag
    centroid_x = _centroid(moments, 1, 0)
    centroid_y = _centroid(moments, 0, 1)

    # Assume no centroid
    ctr = (width/2,height/2)
    err = ctr

    # Use centroid if it exists
    if centroid_x != None and centroid_y != None:

        ctr = (centroid_x, centroid_y)

        # Put black circle in at centroid in image
        cv.Circle(bgr_image, ctr, 4, (0,0,0))

    # Display full-color image
    cv.NamedWindow(WINDOW_NAME)
    cv.ShowImage(WINDOW_NAME, bgr_image)

    # Force image display, setting centroid to None on ESC key input
    if cv.WaitKey(5) == 27:
        ctr = None
    
    # Return coordinates of centroid
    return ctr if ctr != err else None
コード例 #14
0
def channel_processing(channel):
    pass
    cv.AdaptiveThreshold(channel,
                         channel,
                         255,
                         adaptive_method=cv.CV_ADAPTIVE_THRESH_MEAN_C,
                         thresholdType=cv.CV_THRESH_BINARY,
                         blockSize=55,
                         param1=7)
    #mop up the dirt
    cv.Dilate(channel, channel, None, 1)
    cv.Erode(channel, channel, None, 1)
コード例 #15
0
def rotAngle(image):
    dark = cv.CreateImage(cv.GetSize(image), image.depth, image.nChannels)
    cv.Erode(image, dark, None, 2)
    cv.Threshold(dark, dark, 224, 255, cv.CV_THRESH_BINARY_INV)
    appr = approximator(cv.GetSize(dark))
    alpha = appr.approximate(dark, 180.0, 10.0)
    appr.setAlpha(alpha)
    appr.setCollisionPoint(dark)
    forLog = cv.CreateImage(cv.GetSize(dark), cv.IPL_DEPTH_8U, 3)
    cv.CvtColor(dark, forLog, cv.CV_GRAY2BGR)
    appr.draw(forLog)
    return alpha + 180.0, forLog
コード例 #16
0
def threshold(image,
              hsvImg,
              threshImg,
              lowerHSV,
              upperHSV,
              erode_and_dilate=True):
    """ 
    Thresholds an image for a certain range of hsv values 
    """
    cv.Smooth(image, image, cv.CV_GAUSSIAN, 3, 0)
    cv.CvtColor(image, hsvImg, cv.CV_BGR2HSV)
    cv.InRangeS(hsvImg, lowerHSV, upperHSV, threshImg)
    if erode_and_dilate:
        cv.Erode(threshImg, threshImg, None, 2)
        cv.Dilate(threshImg, threshImg, None, 2)
        #cv.Erode(threshImg, threshImg, None, 2)
        #cv.Dilate(threshImg, threshImg, None, 1)
        #cv.Erode(threshImg, threshImg, None, 1)
        cv.Dilate(threshImg, threshImg, None, 1)
        cv.Erode(threshImg, threshImg, None, 1)
    return threshImg
コード例 #17
0
    def fuzz_image(self, image):

        cv.Smooth(image,
                  self.fuzz,
                  smoothtype=cv.CV_GAUSSIAN,
                  param1=23,
                  param2=23)
        # cv.Threshold(self.fuzz, self.fuzz, self.threshold_value, 255, cv.CV_THRESH_BINARY)
        cv.Erode(self.fuzz, self.fuzz, None, self.erode_value)
        cv.Dilate(self.fuzz, self.fuzz, None, self.dialate_value)

        return self.fuzz
コード例 #18
0
ファイル: greenball_tracker.py プロジェクト: hbradlow/Drones
def track(img_bytes, img_width, img_height):
    '''Accepts BGR image bytes, image width, and image height. 
           Returns: (x,y) coordinates of centroid if found
                    None if no centroid was found
                    (0,0) if user hit ESC
    '''

    # Create full-color image from bytes
    full_image = _create_image_header(img_width, img_height, 3)  
    cv.SetData(full_image, img_bytes, img_width*3)
  
    # Create separate red, green, and blue images from bytes
    r_image = _create_grayscale_image(img_bytes, img_width, img_height, 2)
    b_image = _create_grayscale_image(img_bytes, img_width, img_height, 0)
    g_image = _create_grayscale_image(img_bytes, img_width, img_height, 1)

    # Remove 1/3 of red and blue components from green
    threes_image = cv.CreateImage((img_width,img_height), cv.IPL_DEPTH_8U, 1)  
    cv.Set(threes_image, 3)
    _div_and_sub(g_image, r_image, threes_image)
    _div_and_sub(g_image, b_image, threes_image)

    # Threshold and erode green image
    cv.Threshold(g_image, g_image, THRESHOLD, 255, cv.CV_THRESH_BINARY)
    cv.Erode(g_image, g_image)

    # Find centroid of eroded image
    moments = cv.Moments(cv.GetMat(g_image), 1) # binary flag
    centroid_x = _centroid(moments, 1, 0)
    centroid_y = _centroid(moments, 0, 1)

    # Assume no centroid
    ctr = None

    # Use centroid if it exists
    if centroid_x != None and centroid_y != None:

        ctr = (centroid_x, centroid_y)

        # Put black circle in at centroid in image
        cv.Circle(full_image, ctr, 4, (0,0,0))

    # Display full-color image
    cv.NamedWindow(WINDOW_NAME)
    cv.ShowImage(WINDOW_NAME, full_image)

    # Force image display, setting centroid to (0,0) on ESC key input
    if cv.WaitKey(5) == 27:
        ctr = (0,0)
    
    # Return coordinates of centroid
    return ctr
コード例 #19
0
ファイル: main.py プロジェクト: micmax93/RpiTurret
    def getDiff(self, frame):
        diffImg = cv.CloneImage(self.referencedImage)
        cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 9, 0)
        cv.AbsDiff(self.referencedImage, frame, diffImg)

        greyImg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        cv.CvtColor(diffImg, greyImg, cv.CV_RGB2GRAY)
        cv.Threshold(greyImg, greyImg, 30, 255, cv.CV_THRESH_BINARY)
        cv.Dilate(greyImg, greyImg, None, 9)
        cv.Erode(greyImg, greyImg, None, 5)

        # return greyImg
        return greyImg
コード例 #20
0
def printBlobs(img, min_color, max_color):
    blobs = cvblob.Blobs()  #Starts the blob class
    size = cv.GetSize(img)  #gets the size of the img

    hsv = cv.CreateImage(
        size, cv.IPL_DEPTH_8U,
        3)  #creates a new image for when the colored image is converted to hsv
    thresh = cv.CreateImage(
        size, cv.IPL_DEPTH_8U,
        1)  #creates a new image that is 1 channeled for the thresholding
    labelImg = cv.CreateImage(
        size, cvblob.IPL_DEPTH_LABEL,
        1)  #creates a image for later use for showing blobs

    cv.CvtColor(img, hsv,
                cv.CV_BGR2HSV)  #converts the color image to an hsv image
    cv.InRangeS(
        hsv, min_color, max_color, thresh
    )  #finds colors between a min range and a max range with a source img and a destination image

    #these are for corrections to thresholding to remove as many false positives
    cv.Smooth(thresh, thresh, cv.CV_BLUR)  #smooths out the thresholded image
    cv.Dilate(thresh, thresh)  #Dilates the thresholded image
    cv.Erode(thresh, thresh)  #Erodes the thresholded image

    result = cvblob.Label(
        thresh, labelImg, blobs
    )  #Don't know what this does exactly but it is used later to print the total number of pixels found

    numblobs = len(
        blobs.keys()
    )  #Number of blobs found. blobs is a dictionary with the keys as the number of the blob found and the value as the pointer to the location of the blob

    avgSize = int(result /
                  numblobs)  #Average size of the blobs casted as an int

    print "average size: " + str(avgSize)
    arr = []  #empty array to keep track fo what blobs to remove
    for x in blobs:
        if (
                blobs[x].area < avgSize
        ):  #if the size of the blob is less than half of the mean size, the blob is added to the array
            arr.append(x)
    for x in arr:
        del blobs[x]  # the blob is then removed from the dictionary of blobs
    for x in blobs:
        print str(blobs[x]) + "," + str(
            blobs[x].area)  #prints the blob number and the area of the blob
def detecta(imagem):

    cv.Smooth(imagem, imagem, cv.CV_GAUSSIAN, 3)
    maiorArea = 0
    listaContornos = []
    listaVertices = []

    cv.AbsDiff(imagem, fundo, mascara)
    cv.CvtColor(mascara, cinza, cv.CV_BGR2GRAY)
    cv.Threshold(cinza, cinza, 50, 255, cv.CV_THRESH_BINARY)

    cv.Dilate(cinza, cinza, None, 18)
    cv.Erode(cinza, cinza, None, 18)

    armazenamento = cv.CreateMemStorage(0)
    contorno = cv.FindContours(cinza, armazenamento, cv.CV_RETR_LIST,
                               cv.CV_LINK_RUNS)

    while contorno:
        vertices_do_retangulo = cv.BoundingRect(list(contorno))
        listaVertices.append(vertices_do_retangulo)

        listaContornos.append(cv.ContourArea(contorno))
        maiorArea = max(listaContornos)
        maiorArea_index = listaContornos.index(maiorArea)
        retangulo_de_interesse = listaVertices[maiorArea_index]

        contorno = contorno.h_next()

        ponto1 = (retangulo_de_interesse[0], retangulo_de_interesse[1])
        ponto2 = (retangulo_de_interesse[0] + retangulo_de_interesse[2],
                  retangulo_de_interesse[1] + retangulo_de_interesse[3])
        cv.Rectangle(imagem, ponto1, ponto2, cv.CV_RGB(0, 0, 0), 2)
        cv.Rectangle(cinza, ponto1, ponto2, cv.CV_RGB(255, 255, 255), 1)
        largura = ponto2[0] - ponto1[0]
        altura = ponto2[1] - ponto1[1]
        cv.Line(cinza, (ponto1[0] + largura / 2, ponto1[1]),
                (ponto1[0] + largura / 2, ponto2[1]), cv.CV_RGB(255, 255,
                                                                255), 1)
        cv.Line(cinza, (ponto1[0], ponto1[1] + altura / 2),
                (ponto2[0], ponto1[1] + altura / 2), cv.CV_RGB(255, 255,
                                                               255), 1)
        global x
        x = ((640 / 2 - (ponto1[0] + (largura / 2))) * -1) / 5

    cv.ShowImage("Webcam", imagem)
    cv.ShowImage("Mascara", mascara)
    cv.ShowImage("Cinza", cinza)
コード例 #22
0
    def doStuff(self):
        capture = cv.CaptureFromCAM(self.MY_CAMERA)
        if not capture:
            print "I am blinded, check Camera Config"
            exit(1)

        cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow('threshed', cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow('cropped', cv.CV_WINDOW_AUTOSIZE)

        while 1:    
            image = cv.QueryFrame(capture)
#            image = cv.LoadImage("2012_automata.jpg")
            if not image:
                break
        
#/////////////////////////////////////////////////////
#            Blurring my image and doing stuff
            image_smoothed = cv.CloneImage(image)
            cv.Smooth(image, image_smoothed, cv.CV_GAUSSIAN, 1)
            image_threshed = self.thresholded_image(image_smoothed)
            cv.Dilate(image_threshed, image_threshed, None, 3)
            cv.Erode(image_threshed, image_threshed, None, 3)
#///////////////////////////////////////////////////////
#            Get the Contours
            current_contour = cv.FindContours(cv.CloneImage(image_threshed), cv.CreateMemStorage(), cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            object_position=(0,0)
            if len(current_contour) != 0:
                object_position =  self.contourCenter(self.largestContour(current_contour))

#            cropped = cv.CreateImage((image_threshed.width,image_threshed.height), image_threshed.depth, image_threshed.nChannels)
#            print object_position
            try:
                src_region = cv.GetSubRect(image_threshed, (0,object_position[1]-(2/100),image_threshed.width,image_threshed.height*3/100))
            except:
                src_region = cv.GetSubRect(image_threshed, (0,0,image_threshed.width,image_threshed.height*5/100))
            image = self.drawPointOnImage(image, object_position)
            image = self.getSlicedCenter(src_region, image)
            cv.ShowImage('threshed', image_threshed)
            cv.ShowImage('camera', image)
            cv.ShowImage('cropped', src_region)
            c = cv.WaitKey(10)
            if c != -1:
#                return src_region
                break
コード例 #23
0
def CenterFunction(R, imghsv):
	imgyellowthresh=getthresholdedimgRGeneric(R, imghsv) # creaza mastile de culoare, aici specifice robotului R4

	cv.Erode(imgyellowthresh,imgyellowthresh,None,3)#filtru 
	cv.Dilate(imgyellowthresh,imgyellowthresh,None,6)#filtru 
	img2=cv.CloneImage(imgyellowthresh)#cloneaza imaginea in img2, useless
	storage = cv.CreateMemStorage(0)#creaza un loc in memorie unde sa stocheze, necesar ptr FindContours
	contour = cv.FindContours(imgyellowthresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)#gaseste contururile robotilor
	points = []	

#	This is the new part here. ie Use of cv.BoundingRect()
	while contour:
		# Draw bounding rectangles
		bound_rect = cv.BoundingRect(list(contour)) #creaza un patratel din punctele din contur, ptr afisare/debug
		#bound_rect = cv.BoundingRect(contour)

		# for more details about cv.BoundingRect,see documentation
		pt1 = (bound_rect[0], bound_rect[1]) #
		pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
		points.append(pt1)
		points.append(pt2)
		cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)
		#pana aici s-a desenat patratul
	#	Calculating centroids
	
		centroidx=cv.Round((pt1[0]+pt2[0])/2)
		centroidy=cv.Round((pt1[1]+pt2[1])/2)
		area = cv.ContourArea(list(contour))
		#print "CentroidXY:" + str(centroidx) +":" +str(centroidy) + "A:" + str(area)
		if(area > 100):
			print "CentroidXY:" + str(centroidx) +":" +str(centroidy) + "A:" + str(area)
			coords = pack('iiiii', 4,centroidx, centroidy, 0, int(time.time()))
			mosq.publish("coords", coords, 0)
	
		contour = contour.h_next()	
		print contour
		#	Identifying if blue or yellow blobs and adding centroids to corresponding lists	
		if (169<cv.Get2D(imghsv,centroidy,centroidx)[0]<180):
			red.append((centroidx,centroidy))
		elif (100<cv.Get2D(imghsv,centroidy,centroidx)[0]<120):
			blue.append((centroidx,centroidy))
		elif (67<cv.Get2D(imghsv,centroidy,centroidx)[0]<100):
			green.append((centroidx, centroidy))

	return
コード例 #24
0
def preprocess(image, addr, extras):
    log = cap.logger(extras, image)
    alpha, dark = rotAngle(image)
    log.log(dark, False)
    clear = clearNoise(image)
    log.log(clear, False)
    straight = cap.doRotate(clear,
                            -alpha,
                            fillval=0,
                            resize=False,
                            interpolation=cv.CV_INTER_NN)
    #cv.Threshold(straight, straight, 128, 255, cv.CV_THRESH_BINARY)
    log.log(straight)
    cv.Dilate(straight, straight)
    cv.Erode(straight, straight)
    log.log(straight)
    cap.processExtras(log.steps, addr, extras, cap.CAP_STAGE_PREPROCESS)
    return straight
コード例 #25
0
ファイル: motion.py プロジェクト: ahmetech/breakout
 def detectMotion(self, curr):
     assert (curr.nChannels == 1)
     if len(self.history_frames) < self.nHistory:
         self.history_frames.append(curr)
         return curr
     else:
         oldest_frame = self.history_frames.pop(0)
         self.history_frames.append(curr)
     size = (curr.width, curr.height)
     motion_frame = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.AbsDiff(oldest_frame, curr, motion_frame)
     cv.CmpS(motion_frame, self.threshold, motion_frame, cv.CV_CMP_GT)
     # Eliminate disperse pixels, which occur because of
     # the noise of the camera
     img_temp = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.Erode(motion_frame, img_temp)
     cv.Dilate(img_temp, motion_frame)
     return motion_frame
コード例 #26
0
    def __init__(self,img):

        small_img = cv.CreateImage((cv.Round(img.width / image_scale),cv.Round(img.height / image_scale)), 8, 3)
        cv.Resize(img, small_img, cv.CV_INTER_LINEAR)

        if H!=0 and S !=0:
            getSkinColor(small_img, hasColor)

        imgHSV = cv.CreateImage(cv.GetSize(small_img), 8, 3)
        cv.CvtColor(small_img, imgHSV, cv.CV_BGR2HSV);

        hueImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        satImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        valImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        cv.Split(imgHSV, hueImg, satImg, valImg, None)

        cv.ShowImage("hueImg", hueImg)

        hueTrshld = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)
        hueDiff = 30
        satDiff = 80
        for x in range(0, hueTrshld.height):
            for y in range(0, hueTrshld.width):
                hueTrshld[x,y] = 0
                if hueImg[x,y]>(H-hueDiff) and hueImg[x,y]>(1) and hueImg[x,y]<(H+hueDiff):
                    if satImg[x,y]>(S-satDiff) and satImg[x,y]<(S+satDiff):
                        hueTrshld[x,y] = 255

        hueTrshldErode = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)        
        hueTrshldDilate = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)        


        kernel10 = cv.CreateStructuringElementEx(10,10,0,0, cv.CV_SHAPE_RECT)
        kernel8 = cv.CreateStructuringElementEx(8,8,0,0, cv.CV_SHAPE_RECT)
        kernel6 = cv.CreateStructuringElementEx(6,6,0,0, cv.CV_SHAPE_RECT)
        kernel4 = cv.CreateStructuringElementEx(4,4,0,0, cv.CV_SHAPE_RECT)

        cv.Erode(hueTrshld, hueTrshldErode, kernel6, 1)
        cv.Dilate(hueTrshldErode, hueTrshldDilate, kernel10, 1)

        
        cv.ShowImage("hueTrshldOr", hueTrshld) #original
        cv.ShowImage("hueTrshldDi", hueTrshldDilate) #dilated
        cv.ShowImage("hueTrshldEr", hueTrshldErode)  #eroded
コード例 #27
0
def get_angel(src):
    pos = 6
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    ni = cv.CreateImage(cv.GetSize(src), src.depth, src.nChannels)
    cv.Erode(src, ni, element, 1)

    image_gray = cv.CreateImage(cv.GetSize(ni), 8, 1)
    cv.CvtColor(ni, image_gray, cv.CV_RGB2GRAY)
    pi = Image.fromstring("L", cv.GetSize(ni), image_gray.tostring())
    first = 0 if pi.getpixel((0, 0)) < 240 else 255
    xstart = xend = ystart = yend = 0
    for x in xrange(1, pi.size[0]):
        v = 0 if pi.getpixel((x, 0)) < 240 else 255
        if first == 0 and v != 0:
            xstart = x
            xend = pi.size[0]
            break
        if first == 255 and v != 255:
            xstart = 0
            xend = x
            break

    if first == 255:
        for y in xrange(pi.size[1]):
            v = 0 if pi.getpixel((0, y)) < 240 else 255
            if v != 255:
                yend = y
                break
    else:
        for y in xrange(pi.size[1]):
            v = 0 if pi.getpixel((pi.size[0] - 1, y)) < 240 else 255
            if v != 255:
                yend = y
                break

    a = yend - ystart
    b = xend - xstart or 1
    alpha = atan(a * 1.0 / b) / (PI * 1.0 / 180)
    if first == 255:
        alpha = -alpha

    return (alpha, pi.size[0] * 1.0 / 2, pi.size[1] * 1.0 / 2)
コード例 #28
0
  def process_Image(self):
    """ here is where the image should be processed to get the bounding box """
    # check if we've created the supporting images yet
    if self.threshed_image == None:
      if self.image != None:
        self.create_all_images()

    # from the old method call def threshold_image(self):
    cv.Split(self.image, self.blue, self.green, self.red, None)
    cv.CvtColor(self.image, self.hsv, cv.CV_RGB2HSV)
    cv.Split(self.hsv, self.hue, self.sat, self.val, None)

    # replace each channel with its thresholded version
    cv.InRangeS(self.red, self.thresholds['low_red'],\
                self.thresholds['high_red'], self.red)
    cv.InRangeS(self.green, self.thresholds['low_green'],\
                self.thresholds['high_green'], self.green)
    cv.InRangeS(self.blue, self.thresholds['low_blue'],\
                self.thresholds['high_blue'], self.blue)
    cv.InRangeS(self.hue, self.thresholds['low_hue'],\
                self.thresholds['high_hue'], self.hue)
    cv.InRangeS(self.sat, self.thresholds['low_sat'],\
                self.thresholds['high_sat'], self.sat)
    cv.InRangeS(self.val, self.thresholds['low_val'],\
                self.thresholds['high_val'], self.val)

    # AND (multiply) all the thresholded images into one "output" image,
    # named self.copy
    cv.Mul(self.red, self.green, self.copy)
    cv.Mul(self.copy, self.blue, self.copy)
    cv.Mul(self.copy, self.hue, self.copy)
    cv.Mul(self.copy, self.sat, self.copy)
    cv.Mul(self.copy, self.val, self.copy)
    # erode and dilate shave off and add edge pixels respectively
    cv.Erode(self.copy, self.copy, iterations = 1)
    cv.Dilate(self.copy, self.copy, iterations = 1)

    # Make self.threshed_image be self.copy
    cv.Copy(self.copy,self.threshed_image)

    self.find_biggest_region()
コード例 #29
0
ファイル: VisionProcessor.py プロジェクト: sdp-2011/sdp-2
    def detect_ball(self, hsv_img, erd_mat, erd, dil_mat, dil):
        size = cv.GetSize(hsv_img)
        # colours on pitch2 (note conversion is from BGR2HSV not RGB2HSV!)
        trsh_im = self.red_color_.in_range_s(hsv_img)

        cv.Dilate(
            trsh_im, trsh_im,
            cv.CreateStructuringElementEx(dil_mat[0], dil_mat[1], 0, 0, 0),
            dil)
        cv.Erode(
            trsh_im, trsh_im,
            cv.CreateStructuringElementEx(erd_mat[0], erd_mat[1], 0, 0, 0),
            erd)

        tmp_im = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        cv.Copy(trsh_im, tmp_im)
        largest = find_largest_contour(
            cv.FindContours(tmp_im, cv.CreateMemStorage(0),
                            cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_NONE))
        if not largest: return trsh_im, None
        return trsh_im, Math.int_vec(get_contour_center(cv.Moments(largest)))
コード例 #30
0
def threshold_image(D):
    """ runs the image processing in order to create a 
        black and white thresholded image out of D.image
        into D.threshed_image
    """

    # Use OpenCV to split the image up into channels,
    # saving them in their respective bw images
    cv.Split(D.image, D.blue, D.green, D.red, None)

    # This line creates a hue-saturation-value image
    cv.CvtColor(D.image, D.hsv, cv.CV_RGB2HSV)
    cv.Split(D.hsv, D.hue, D.sat, D.val, None)

    # Here is how OpenCV thresholds the images based on the slider values:
    cv.InRangeS(D.red, D.thresholds["low_red"], \
                    D.thresholds["high_red"], D.red_threshed)
    cv.InRangeS(D.green, D.thresholds["low_green"], \
                    D.thresholds["high_green"], D.green_threshed)
    cv.InRangeS(D.blue, D.thresholds["low_blue"], \
                    D.thresholds["high_blue"], D.blue_threshed)
    cv.InRangeS(D.hue, D.thresholds["low_hue"], \
                    D.thresholds["high_hue"], D.hue_threshed)
    cv.InRangeS(D.sat, D.thresholds["low_sat"], \
                    D.thresholds["high_sat"], D.sat_threshed)
    cv.InRangeS(D.val, D.thresholds["low_val"], \
                    D.thresholds["high_val"], D.val_threshed)

    # Multiply all the thresholded images into one "output" image,
    # named D.threshed_image"]
    cv.Mul(D.red_threshed, D.green_threshed, D.threshed_image)
    cv.Mul(D.threshed_image, D.blue_threshed, D.threshed_image)
    cv.Mul(D.threshed_image, D.hue_threshed, D.threshed_image)
    cv.Mul(D.threshed_image, D.sat_threshed, D.threshed_image)
    cv.Mul(D.threshed_image, D.val_threshed, D.threshed_image)

    # Erode and Dilate shave off and add edge pixels respectively
    cv.Erode(D.threshed_image, D.threshed_image, iterations=1)
    cv.Dilate(D.threshed_image, D.threshed_image, iterations=1)