Esempio n. 1
0
    def find_blobs(self, frame, debug_image):
        '''Find blobs in an image.

        Hopefully this gets blobs that correspond with
        buoys, but any intelligent checking is done outside of this function.

        '''

        # Get Channels
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        saturation = libvision.misc.get_channel(hsv, 1)
        red = libvision.misc.get_channel(frame, 2)

        # Adaptive Threshold
        cv.AdaptiveThreshold(
            saturation,
            saturation,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.saturation_adaptive_thresh_blocksize -
            self.saturation_adaptive_thresh_blocksize % 2 + 1,
            self.saturation_adaptive_thresh,
        )
        cv.AdaptiveThreshold(
            red,
            red,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY,
            self.red_adaptive_thresh_blocksize -
            self.red_adaptive_thresh_blocksize % 2 + 1,
            -1 * self.red_adaptive_thresh,
        )

        kernel = cv.CreateStructuringElementEx(9, 9, 4, 4, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(saturation, saturation, kernel, 1)
        cv.Dilate(saturation, saturation, kernel, 1)
        cv.Erode(red, red, kernel, 1)
        cv.Dilate(red, red, kernel, 1)

        buoys_filter = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.And(saturation, red, buoys_filter)

        if debug_image:
            svr.debug("Saturation", saturation)
            svr.debug("Red", red)
            svr.debug("AdaptiveThreshold", buoys_filter)

        # Get blobs
        labeled_image = cv.CreateImage(cv.GetSize(buoys_filter), 8, 1)
        blobs = libvision.blob.find_blobs(buoys_filter, labeled_image,
                                          MIN_BLOB_SIZE, 10)

        return blobs, labeled_image
Esempio n. 2
0
    def updateAccumulatorImage(self):

        if "ImpactMotion" in self.images.keys() \
            and self.images[ "ImpactMotion" ] != None:

            # The accumulator starts with the impact image
            accumulatorArray = np.copy(self.images["ImpactMotion"]).astype(
                np.int32)

            # Take maximum values from motion images after the impact but
            # don't add them in to de-emphasise the manipulator
            for imageName in self.config.imageData.keys():

                image = self.images[imageName]
                imageData = self.config.imageData[imageName]

                if image != None \
                    and imageData.active \
                    and imageName.find( "PostMotion" ) == 0:

                    transformedImage = scipy.ndimage.interpolation.shift(
                        image,
                        (imageData.displacementY, imageData.displacementX))
                    accumulatorArray = np.maximum(accumulatorArray,
                                                  transformedImage)

            # Dilate and subtract motion images from before the impact
            for imageName in self.config.imageData.keys():

                image = self.images[imageName]
                imageData = self.config.imageData[imageName]

                if image != None \
                    and imageData.active \
                    and imageName.find( "PreMotion" ) == 0:

                    transformedImage = scipy.ndimage.interpolation.shift(
                        image,
                        (imageData.displacementY, imageData.displacementX))
                    cv.Dilate(transformedImage, transformedImage)
                    cv.Dilate(transformedImage, transformedImage)
                    cv.Dilate(transformedImage, transformedImage)
                    accumulatorArray = accumulatorArray - transformedImage

            self.accumulatorImage = np.clip(accumulatorArray, 0,
                                            255).astype(np.uint8)
            self.dwgAccumulatorImageDisplay.setImageFromNumpyArray(
                self.accumulatorImage)

        else:
            self.accumulatorImage = None
            self.dwgAccumulatorImageDisplay.clear()

        self.updateMaskImage()
Esempio n. 3
0
def find_lines(frame):
    # Resize to 640x480
    frame_small = cv.CreateMat(480, 640, cv.CV_8UC3)
    cv.Resize(frame, frame_small)

    # Threshold by distance: blank out all top pixels
    cv.Rectangle(frame_small, (0, 0), (640, 80), (0, 0, 0, 0), cv.CV_FILLED)

    frame_size = cv.GetSize(frame_small)
    frame_gray = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    edges = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(frame_small, frame_gray, cv.CV_BGR2GRAY)

    cv.Canny(frame_gray, edges, 400, 400)
    cv.Dilate(edges, edges,
              cv.CreateStructuringElementEx(3, 3, 0, 0, cv.CV_SHAPE_RECT))
    cv.Erode(edges, edges,
             cv.CreateStructuringElementEx(1, 1, 0, 0, cv.CV_SHAPE_RECT))

    line_storage = cv.CreateMemStorage()
    lines = cv.HoughLines2(edges, line_storage, cv.CV_HOUGH_PROBABILISTIC, 1,
                           cv.CV_PI / 180.0, 300, 100, 40)
    print len(lines), 'lines found'
    for i in range(len(lines)):
        line = lines[i]
        cv.Line(frame_small, line[0], line[1],
                hv2rgb(360.0 * i / len(lines), 1.0), 3, 8)

    cv.ShowImage('frame', frame_small)
    cv.ShowImage('edges', edges)
Esempio n. 4
0
def threshold_green(image):
    #cv.InRange(blurred_image,GREEN_MIN,GREEN_MAX,green_adaptive)
    cv.AdaptiveThreshold(image, green_adaptive, 255,
                         cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV,
                         101, 40)  #25
    cv.Erode(green_adaptive, green_eroded_image, None, 5)  #9
    cv.Dilate(green_eroded_image, green_dilated_image, None, 6)  #27
Esempio n. 5
0
def threshold_red(image):
    #bright cv.AdaptiveThreshold(image,red_adaptive,255,cv.CV_ADAPTIVE_THRESH_MEAN_C,cv.CV_THRESH_BINARY,17,-30)
    cv.AdaptiveThreshold(image, red_adaptive, 255,
                         cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY, 17,
                         -14)
    cv.Erode(red_adaptive, red_eroded_image, None, 1)
    cv.Dilate(red_eroded_image, red_dilated_image, None, 5)
Esempio n. 6
0
    def findRectPoints(self, oldRectPoints):
        hueRange = self.hueRange
        clone = cv.CloneImage(self.frame)
        hsv = cv.CloneImage(self.channels3)
        threshold = cv.CloneImage(self.channels1)
        threshold2 = cv.CloneImage(self.channels1)

        cv.CvtColor(clone, hsv, cv.CV_RGB2HSV)

        cv.InRangeS(hsv, (165, 100, 100), (180, 255, 255), threshold)
        cv.InRangeS(hsv, (0, 100, 100), (15, 255, 255), threshold2)
        cv.Add(threshold, threshold2, threshold)
        self.hue += 1
        print self.hue
        cv.Erode(threshold, threshold, iterations=5)
        cv.Dilate(threshold, threshold, iterations=5)

        cv.ShowImage(self.color, threshold)

        memory = cv.CreateMemStorage(0)
        clone2 = cv.CloneImage(threshold)
        contours = cv.FindContours(clone2, memory, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
        if not contours:
            rectPoints = oldRectPoints
        else:
            rectPoints = cv.BoundingRect(contours)
        return rectPoints
Esempio n. 7
0
    def findRectPoints(self, oldRectPoints):
        hueRange = self.hueRange
        satRange = self.satRange
        valRange = self.valRange
        clone = cv.CloneImage(self.frame)
        hsv = cv.CloneImage(self.channels3)
        threshold = cv.CloneImage(self.channels1)
        threshold2 = cv.CloneImage(self.channels1)

        cv.Smooth(clone, clone, cv.CV_GAUSSIAN, 7, 7)

        cv.CvtColor(clone, hsv, cv.CV_BGR2HSV)
        cv.InRangeS(hsv, (hueRange[0], satRange[0], valRange[0]),
                    (hueRange[1], satRange[1], satRange[1]), threshold)
        cv.InRangeS(hsv, (hueRange[2], satRange[0], satRange[0]),
                    (hueRange[3], satRange[1], valRange[1]), threshold2)
        cv.Add(threshold, threshold2, threshold)
        cv.Erode(threshold, threshold, iterations=5)
        cv.Dilate(threshold, threshold, iterations=5)

        #       cv.ShowImage(self.color, threshold)

        memory = cv.CreateMemStorage(0)
        clone2 = cv.CloneImage(threshold)
        contours = cv.FindContours(clone2, memory, cv.CV_RETR_LIST,
                                   cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))
        if not contours:
            rectPoints = oldRectPoints
        else:
            rectPoints = cv.BoundingRect(list(contours))
        return rectPoints
Esempio n. 8
0
def findPoints(frame, oldRectPoints):
    imageSize = cv.GetSize(frame)
    original = cv.CloneImage(frame)
    hsv = cv.CreateImage(imageSize, 8, 3)
    threshold = cv.CreateImage(imageSize, 8, 1)

    # Do things to the image to isolate the red parts

    cv.CvtColor(original, hsv, cv.CV_RGB2HSV)

    cv.InRangeS(hsv, (110, 80, 80), (140, 255, 255), threshold)
    cv.Erode(threshold, threshold, iterations=5)
    cv.Dilate(threshold, threshold, iterations=5)
    cv.ShowImage("shit", threshold)

    memory = cv.CreateMemStorage(0)
    clone = cv.CloneImage(threshold)
    contours = cv.FindContours(clone, memory, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))

    #   area = cv.ContourArea(contours)
    if not contours:
        # If there's no red on the screen
        rectPoints = oldRectPoints
    else:
        rectPoints = cv.BoundingRect(contours)
    # print rectPoints

    return rectPoints
Esempio n. 9
0
def get_mask_with_contour(img,
                          ret_img=False,
                          ret_cont=False,
                          with_init_mask=False,
                          cont_color=cv.RGB(255, 50, 50),
                          normalize=True,
                          skin_version=1,
                          strong=False):
    if normalize:
        img = normalize_rgb(img, aggressive=0.005)
    mask = skin_mask(img) if skin_version == 1 else skin_mask2(img)

    di_mask = image_empty_clone(mask)
    cv.Dilate(mask, di_mask)

    seqs = cv.FindContours(cv.CloneImage(di_mask), memory(),
                           cv.CV_RETR_EXTERNAL)

    c_img = image_empty_clone(mask)
    cv.DrawContours(c_img, seqs, 255, 255, 10, -1)

    er_img = image_empty_clone(c_img)
    cv.Erode(c_img, er_img, iterations=2)

    seqs = cv.FindContours(cv.CloneImage(er_img), memory(),
                           cv.CV_RETR_EXTERNAL)
    if not seqs:
        print "no areas"
        return img, None
    seqs = cv.ApproxPoly(seqs,
                         memory(),
                         cv.CV_POLY_APPROX_DP,
                         parameter=3,
                         parameter2=1)

    result = []
    if ret_img:
        #        er_seq_img = cv.CreateImage(sizeOf(er_img), 8, 3)
        #        cv.Zero(er_seq_img)
        er_seq_img = cv.CloneImage(img)
        if with_init_mask:
            cv.Merge(mask, mask, mask, None, er_seq_img)

        if strong:
            cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=3)
            cv.DrawContours(er_seq_img,
                            seqs,
                            cv.RGB(0, 0, 0),
                            0,
                            10,
                            thickness=1)
        else:
            cv.DrawContours(er_seq_img, seqs, cont_color, 0, 10, thickness=1)
        result.append(er_seq_img)

    if ret_cont:
        result.append(seqs)

    return result
Esempio n. 10
0
def threshhold(img):
    bwdst = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(img, bwdst, cv.CV_BGR2GRAY)
    cv.AdaptiveThreshold(bwdst, bwdst, 255.0, cv.CV_THRESH_BINARY,
                         cv.CV_ADAPTIVE_THRESH_MEAN_C, 11)
    cv.Dilate(bwdst, bwdst)
    cv.Erode(bwdst, bwdst)
    return bwdst
Esempio n. 11
0
    def create_image(self):

        #Find the size of the image

        #Create images for each channel
        blue = cv.CreateImage(self.size, 8, 1)
        red = cv.CreateImage(self.size, 8, 1)
        green = cv.CreateImage(self.size, 8, 1)

        hue = cv.CreateImage(self.size, 8, 1)
        sat = cv.CreateImage(self.size, 8, 1)
        val = cv.CreateImage(self.size, 8, 1)

        #Create an image to be returned and eventually displayed
        thresholds = cv.CreateImage(self.size, 8, 1)

        #Create images to save the thresholded images to
        red_threshed = cv.CreateImage(self.size, 8, 1)
        green_threshed = cv.CreateImage(self.size, 8, 1)
        blue_threshed = cv.CreateImage(self.size, 8, 1)

        hue_threshed = cv.CreateImage(self.size, 8, 1)
        sat_threshed = cv.CreateImage(self.size, 8, 1)
        val_threshed = cv.CreateImage(self.size, 8, 1)

        #Split the image up into channels, saving them in their respective image
        cv.Split(self.image, blue, green, red, None)
        cv.CvtColor(self.image, self.hsv, cv.CV_RGB2HSV)
        cv.Split(self.hsv, hue, sat, val, None)

        #Threshold the images based on the slider values
        cv.InRangeS(red, self.thresholds['low_red'],\
                    self.thresholds['high_red'], red_threshed)
        cv.InRangeS(green, self.thresholds['low_green'],\
                    self.thresholds['high_green'], green_threshed)
        cv.InRangeS(blue, self.thresholds['low_blue'],\
                    self.thresholds['high_blue'], blue_threshed)

        cv.InRangeS(hue, self.thresholds['low_hue'],\
                    self.thresholds['high_hue'], hue_threshed)
        cv.InRangeS(sat, self.thresholds['low_sat'],\
                    self.thresholds['high_sat'], sat_threshed)
        cv.InRangeS(val, self.thresholds['low_val'],\
                    self.thresholds['high_val'], val_threshed)

        #Recombine all of the thresholded images into one image
        cv.Mul(red_threshed, green_threshed, thresholds)
        cv.Mul(thresholds, blue_threshed, thresholds)
        cv.Mul(thresholds, hue_threshed, thresholds)
        cv.Mul(thresholds, sat_threshed, thresholds)
        cv.Mul(thresholds, val_threshed, thresholds)

        #Erode and Dilate shave off and add edge pixels respectively
        cv.Erode(thresholds, thresholds, iterations=1)
        cv.Dilate(thresholds, thresholds, iterations=1)

        return thresholds
Esempio n. 12
0
 def subtract(self, thres_chan):
     cv.RunningAvg(thres_chan, self.accumulator, self.adaptation_rate)
     cv.CvtScale(thres_chan, self.green32_img)
     cv.Sub(self.green32_img, self.accumulator, self.difference_img)
     cv.Threshold(self.difference_img, self.thresholded_img, self.threshold,
                  1, cv.CV_THRESH_BINARY)
     cv.Dilate(self.thresholded_img, self.thresholded_img, iterations=1)
     blob.remove_large_blobs(self.thresholded_img, max_area=self.max_area)
     return self.thresholded_img
Esempio n. 13
0
def channel_processing(channel):
    pass
    cv.AdaptiveThreshold(channel,
                         channel,
                         255,
                         adaptive_method=cv.CV_ADAPTIVE_THRESH_MEAN_C,
                         thresholdType=cv.CV_THRESH_BINARY,
                         blockSize=55,
                         param1=7)
    #mop up the dirt
    cv.Dilate(channel, channel, None, 1)
    cv.Erode(channel, channel, None, 1)
Esempio n. 14
0
    def fuzz_image(self, image):

        cv.Smooth(image,
                  self.fuzz,
                  smoothtype=cv.CV_GAUSSIAN,
                  param1=23,
                  param2=23)
        # cv.Threshold(self.fuzz, self.fuzz, self.threshold_value, 255, cv.CV_THRESH_BINARY)
        cv.Erode(self.fuzz, self.fuzz, None, self.erode_value)
        cv.Dilate(self.fuzz, self.fuzz, None, self.dialate_value)

        return self.fuzz
Esempio n. 15
0
def threshold(image,
              hsvImg,
              threshImg,
              lowerHSV,
              upperHSV,
              erode_and_dilate=True):
    """ 
    Thresholds an image for a certain range of hsv values 
    """
    cv.Smooth(image, image, cv.CV_GAUSSIAN, 3, 0)
    cv.CvtColor(image, hsvImg, cv.CV_BGR2HSV)
    cv.InRangeS(hsvImg, lowerHSV, upperHSV, threshImg)
    if erode_and_dilate:
        cv.Erode(threshImg, threshImg, None, 2)
        cv.Dilate(threshImg, threshImg, None, 2)
        #cv.Erode(threshImg, threshImg, None, 2)
        #cv.Dilate(threshImg, threshImg, None, 1)
        #cv.Erode(threshImg, threshImg, None, 1)
        cv.Dilate(threshImg, threshImg, None, 1)
        cv.Erode(threshImg, threshImg, None, 1)
    return threshImg
Esempio n. 16
0
    def threshold(self, thres_low, thres_high, thres_chan):
        result_val = 1  #Change result_val to 255 if need to view image

        cv.Threshold(thres_chan, self.thresholded_low, thres_low, result_val,
                     cv.CV_THRESH_BINARY)
        cv.Dilate(
            self.thresholded_low, self.thresholded_low
        )  #thresholded_low thresholded image using threshold for dark regions
        blob.remove_large_blobs(self.thresholded_low, self.max_area)

        cv.Threshold(thres_chan, self.thresholded_high, thres_high, result_val,
                     cv.CV_THRESH_BINARY)
        cv.Dilate(
            self.thresholded_high, self.thresholded_high
        )  #thresholded_high thresholded image using threshold for bright regions
        blob.remove_large_blobs(self.thresholded_high,
                                self.max_area)  #, show=True)

        cv.Or(self.thresholded_low, self.thresholded_high,
              self.thresholded_combined)
        return self.thresholded_combined
def precornerdetect(image):
    # assume that the image is floating-point
    corners = cv.CloneMat(image)
    cv.PreCornerDetect(image, corners, 3)

    dilated_corners = cv.CloneMat(image)
    cv.Dilate(corners, dilated_corners, None, 1)

    corner_mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
    cv.Sub(corners, dilated_corners, corners)
    cv.CmpS(corners, 0, corner_mask, cv.CV_CMP_GE)
    return (corners, corner_mask)
def find_squares4(color_img):
    """
    Finds multiple squares in image

    Steps:
    -Use Canny edge to highlight contours, and dilation to connect
    the edge segments.
    -Threshold the result to binary edge tokens
    -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
    -Filter each candidate: use Approx poly, keep only contours with 4 vertices, 
    enough area, and ~90deg angles.

    Return all squares contours in one flat list of arrays, 4 x,y points each.
    """
    #select even sizes only
    width, height = (color_img.width & -2, color_img.height & -2)
    timg = cv.CloneImage(color_img)  # make a copy of input image
    gray = cv.CreateImage((width, height), 8, 1)

    # select the maximum ROI in the image
    cv.SetImageROI(timg, (0, 0, width, height))

    # down-scale and upscale the image to filter out the noise
    pyr = cv.CreateImage((width / 2, height / 2), 8, 3)
    cv.PyrDown(timg, pyr, 7)
    cv.PyrUp(pyr, timg, 7)

    tgray = cv.CreateImage((width, height), 8, 1)
    squares = []

    # Find squares in every color plane of the image
    # Two methods, we use both:
    # 1. Canny to catch squares with gradient shading. Use upper threshold
    # from slider, set the lower to 0 (which forces edges merging). Then
    # dilate canny output to remove potential holes between edge segments.
    # 2. Binary thresholding at multiple levels
    N = 11
    for c in [0, 1, 2]:
        #extract the c-th color plane
        cv.SetImageCOI(timg, c + 1)
        cv.Copy(timg, tgray, None)
        cv.Canny(tgray, gray, 0, 50, 5)
        cv.Dilate(gray, gray)
        squares = squares + find_squares_from_binary(gray)

        # Look for more squares at several threshold levels
        for l in range(1, N):
            cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255,
                         cv.CV_THRESH_BINARY)
            squares = squares + find_squares_from_binary(gray)

    return squares
Esempio n. 19
0
    def getDiff(self, frame):
        diffImg = cv.CloneImage(self.referencedImage)
        cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 9, 0)
        cv.AbsDiff(self.referencedImage, frame, diffImg)

        greyImg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        cv.CvtColor(diffImg, greyImg, cv.CV_RGB2GRAY)
        cv.Threshold(greyImg, greyImg, 30, 255, cv.CV_THRESH_BINARY)
        cv.Dilate(greyImg, greyImg, None, 9)
        cv.Erode(greyImg, greyImg, None, 5)

        # return greyImg
        return greyImg
Esempio n. 20
0
def printBlobs(img, min_color, max_color):
    blobs = cvblob.Blobs()  #Starts the blob class
    size = cv.GetSize(img)  #gets the size of the img

    hsv = cv.CreateImage(
        size, cv.IPL_DEPTH_8U,
        3)  #creates a new image for when the colored image is converted to hsv
    thresh = cv.CreateImage(
        size, cv.IPL_DEPTH_8U,
        1)  #creates a new image that is 1 channeled for the thresholding
    labelImg = cv.CreateImage(
        size, cvblob.IPL_DEPTH_LABEL,
        1)  #creates a image for later use for showing blobs

    cv.CvtColor(img, hsv,
                cv.CV_BGR2HSV)  #converts the color image to an hsv image
    cv.InRangeS(
        hsv, min_color, max_color, thresh
    )  #finds colors between a min range and a max range with a source img and a destination image

    #these are for corrections to thresholding to remove as many false positives
    cv.Smooth(thresh, thresh, cv.CV_BLUR)  #smooths out the thresholded image
    cv.Dilate(thresh, thresh)  #Dilates the thresholded image
    cv.Erode(thresh, thresh)  #Erodes the thresholded image

    result = cvblob.Label(
        thresh, labelImg, blobs
    )  #Don't know what this does exactly but it is used later to print the total number of pixels found

    numblobs = len(
        blobs.keys()
    )  #Number of blobs found. blobs is a dictionary with the keys as the number of the blob found and the value as the pointer to the location of the blob

    avgSize = int(result /
                  numblobs)  #Average size of the blobs casted as an int

    print "average size: " + str(avgSize)
    arr = []  #empty array to keep track fo what blobs to remove
    for x in blobs:
        if (
                blobs[x].area < avgSize
        ):  #if the size of the blob is less than half of the mean size, the blob is added to the array
            arr.append(x)
    for x in arr:
        del blobs[x]  # the blob is then removed from the dictionary of blobs
    for x in blobs:
        print str(blobs[x]) + "," + str(
            blobs[x].area)  #prints the blob number and the area of the blob
def detecta(imagem):

    cv.Smooth(imagem, imagem, cv.CV_GAUSSIAN, 3)
    maiorArea = 0
    listaContornos = []
    listaVertices = []

    cv.AbsDiff(imagem, fundo, mascara)
    cv.CvtColor(mascara, cinza, cv.CV_BGR2GRAY)
    cv.Threshold(cinza, cinza, 50, 255, cv.CV_THRESH_BINARY)

    cv.Dilate(cinza, cinza, None, 18)
    cv.Erode(cinza, cinza, None, 18)

    armazenamento = cv.CreateMemStorage(0)
    contorno = cv.FindContours(cinza, armazenamento, cv.CV_RETR_LIST,
                               cv.CV_LINK_RUNS)

    while contorno:
        vertices_do_retangulo = cv.BoundingRect(list(contorno))
        listaVertices.append(vertices_do_retangulo)

        listaContornos.append(cv.ContourArea(contorno))
        maiorArea = max(listaContornos)
        maiorArea_index = listaContornos.index(maiorArea)
        retangulo_de_interesse = listaVertices[maiorArea_index]

        contorno = contorno.h_next()

        ponto1 = (retangulo_de_interesse[0], retangulo_de_interesse[1])
        ponto2 = (retangulo_de_interesse[0] + retangulo_de_interesse[2],
                  retangulo_de_interesse[1] + retangulo_de_interesse[3])
        cv.Rectangle(imagem, ponto1, ponto2, cv.CV_RGB(0, 0, 0), 2)
        cv.Rectangle(cinza, ponto1, ponto2, cv.CV_RGB(255, 255, 255), 1)
        largura = ponto2[0] - ponto1[0]
        altura = ponto2[1] - ponto1[1]
        cv.Line(cinza, (ponto1[0] + largura / 2, ponto1[1]),
                (ponto1[0] + largura / 2, ponto2[1]), cv.CV_RGB(255, 255,
                                                                255), 1)
        cv.Line(cinza, (ponto1[0], ponto1[1] + altura / 2),
                (ponto2[0], ponto1[1] + altura / 2), cv.CV_RGB(255, 255,
                                                               255), 1)
        global x
        x = ((640 / 2 - (ponto1[0] + (largura / 2))) * -1) / 5

    cv.ShowImage("Webcam", imagem)
    cv.ShowImage("Mascara", mascara)
    cv.ShowImage("Cinza", cinza)
Esempio n. 22
0
def get_canny_img(img):
    size = sizeOf(img)
    plate = cv.CreateImage(size, 8, 1)
    cv.Set(plate, 255)
    for k in (50, 100,150,200,250):
#    k=100
        edges = cv.CreateImage(size, 8, 1)
        cv.Canny(img, edges, k-20, k)
#                show_image(edges)
        if k >= 100:
            cv.Dilate(edges,edges)
        else:
            k+=50
#        cv.Erode(edges,edges)
        cv.Set(plate, 255 - k, edges)
    return plate
Esempio n. 23
0
def image_processor():
    cv.Smooth(gray_image, gray_image, cv.CV_GAUSSIAN, 3,
              3)  #Blurring to remove some noise
    cv.AbsDiff(prev_image, gray_image,
               accumulator)  #Getting the difference image
    cv.InRangeS(accumulator, threshold_limit1_lower, threshold_limit1_upper,
                accumulator)  #Thresholding the difference image
    cv.Dilate(accumulator, accumulator, None,
              2)  #Dilating the thresholded difference image
    cv.Add(accumulator, sum_image, sum_image,
           accumulator)  #Adding the image to a register to use fading
    cv.SubS(sum_image, fading_factor, sum_image)  #Fading
    cv.InRangeS(sum_image, threshold_limit2_lower, threshold_limit2_upper,
                accumulator)  #Thresholding the fading image
    cv.Copy(gray_image, prev_image)
    cv.Copy(accumulator, temp_image)
Esempio n. 24
0
def find_corners(frame, pf):
    # Resize to 640x480
    frame_small = cv.CreateMat(480, 640, cv.CV_8UC3)
    cv.Resize(frame, frame_small)

    frame_size = cv.GetSize(frame_small)
    frame_gray = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    edges = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(frame_small, frame_gray, cv.CV_BGR2GRAY)
    cv.Canny(frame_gray, edges, 400, 400)
    cv.Dilate(edges, edges)

    line_storage = cv.CreateMemStorage()
    lines = cv.HoughLines2(edges, line_storage, cv.CV_HOUGH_PROBABILISTIC, 1,
                           cv.CV_PI / 180.0, 300, 100, 40)
    print len(lines), 'lines found'
    for i in range(len(lines)):
        line = lines[i]
        cv.Line(frame_small, line[0], line[1],
                hv2rgb(360.0 * i / len(lines), 1.0), 3, 8)
        print line

        # Generate an observation: (dist, heading) to line
        if i < 4:
            p1 = util.pixelToDistance(line[0])
            p2 = util.pixelToDistance(line[1])
            dist = util.pointLineDistance((0, 0), (p1, p2))
            pf.observeLine((dist, 0))

    # Find corners
    eig_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 1)
    temp_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 1)
    corners = cv.GoodFeaturesToTrack(frame_gray,
                                     eig_image,
                                     temp_image,
                                     10,
                                     0.04,
                                     1.0,
                                     useHarris=True)
    # Take 2 strongest corners
    for pt in corners[:2]:
        print "good feature at", pt[0], pt[1]
        cv.Circle(frame_small, pt, 5, cv.CV_RGB(255, 0, 0), 2, 5, 0)

    cv.ShowImage('frame', frame_small)
    cv.ShowImage('edges', edges)
Esempio n. 25
0
 def morphology(self, image):
     """ remove noisy pixels by doing erode and dilate """
     cv.MorphologyEx(self.th,
                     self.temp,
                     None,
                     self.morpher_small,
                     cv.CV_MOP_OPEN,
                     iterations=1)
     cv.MorphologyEx(self.temp,
                     self.morphed,
                     None,
                     self.morpher,
                     cv.CV_MOP_CLOSE,
                     iterations=1)
     cv.Dilate(self.morphed, self.temp, self.morpher)
     cv.Copy(self.temp, self.morphed)
     return self.morphed
Esempio n. 26
0
    def doStuff(self):
        capture = cv.CaptureFromCAM(self.MY_CAMERA)
        if not capture:
            print "I am blinded, check Camera Config"
            exit(1)

        cv.NamedWindow('camera', cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow('threshed', cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow('cropped', cv.CV_WINDOW_AUTOSIZE)

        while 1:    
            image = cv.QueryFrame(capture)
#            image = cv.LoadImage("2012_automata.jpg")
            if not image:
                break
        
#/////////////////////////////////////////////////////
#            Blurring my image and doing stuff
            image_smoothed = cv.CloneImage(image)
            cv.Smooth(image, image_smoothed, cv.CV_GAUSSIAN, 1)
            image_threshed = self.thresholded_image(image_smoothed)
            cv.Dilate(image_threshed, image_threshed, None, 3)
            cv.Erode(image_threshed, image_threshed, None, 3)
#///////////////////////////////////////////////////////
#            Get the Contours
            current_contour = cv.FindContours(cv.CloneImage(image_threshed), cv.CreateMemStorage(), cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            object_position=(0,0)
            if len(current_contour) != 0:
                object_position =  self.contourCenter(self.largestContour(current_contour))

#            cropped = cv.CreateImage((image_threshed.width,image_threshed.height), image_threshed.depth, image_threshed.nChannels)
#            print object_position
            try:
                src_region = cv.GetSubRect(image_threshed, (0,object_position[1]-(2/100),image_threshed.width,image_threshed.height*3/100))
            except:
                src_region = cv.GetSubRect(image_threshed, (0,0,image_threshed.width,image_threshed.height*5/100))
            image = self.drawPointOnImage(image, object_position)
            image = self.getSlicedCenter(src_region, image)
            cv.ShowImage('threshed', image_threshed)
            cv.ShowImage('camera', image)
            cv.ShowImage('cropped', src_region)
            c = cv.WaitKey(10)
            if c != -1:
#                return src_region
                break
def CenterFunction(R, imghsv):
	imgyellowthresh=getthresholdedimgRGeneric(R, imghsv) # creaza mastile de culoare, aici specifice robotului R4

	cv.Erode(imgyellowthresh,imgyellowthresh,None,3)#filtru 
	cv.Dilate(imgyellowthresh,imgyellowthresh,None,6)#filtru 
	img2=cv.CloneImage(imgyellowthresh)#cloneaza imaginea in img2, useless
	storage = cv.CreateMemStorage(0)#creaza un loc in memorie unde sa stocheze, necesar ptr FindContours
	contour = cv.FindContours(imgyellowthresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)#gaseste contururile robotilor
	points = []	

#	This is the new part here. ie Use of cv.BoundingRect()
	while contour:
		# Draw bounding rectangles
		bound_rect = cv.BoundingRect(list(contour)) #creaza un patratel din punctele din contur, ptr afisare/debug
		#bound_rect = cv.BoundingRect(contour)

		# for more details about cv.BoundingRect,see documentation
		pt1 = (bound_rect[0], bound_rect[1]) #
		pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
		points.append(pt1)
		points.append(pt2)
		cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)
		#pana aici s-a desenat patratul
	#	Calculating centroids
	
		centroidx=cv.Round((pt1[0]+pt2[0])/2)
		centroidy=cv.Round((pt1[1]+pt2[1])/2)
		area = cv.ContourArea(list(contour))
		#print "CentroidXY:" + str(centroidx) +":" +str(centroidy) + "A:" + str(area)
		if(area > 100):
			print "CentroidXY:" + str(centroidx) +":" +str(centroidy) + "A:" + str(area)
			coords = pack('iiiii', 4,centroidx, centroidy, 0, int(time.time()))
			mosq.publish("coords", coords, 0)
	
		contour = contour.h_next()	
		print contour
		#	Identifying if blue or yellow blobs and adding centroids to corresponding lists	
		if (169<cv.Get2D(imghsv,centroidy,centroidx)[0]<180):
			red.append((centroidx,centroidy))
		elif (100<cv.Get2D(imghsv,centroidy,centroidx)[0]<120):
			blue.append((centroidx,centroidy))
		elif (67<cv.Get2D(imghsv,centroidy,centroidx)[0]<100):
			green.append((centroidx, centroidy))

	return
Esempio n. 28
0
 def detectMotion(self, curr):
     assert (curr.nChannels == 1)
     if len(self.history_frames) < self.nHistory:
         self.history_frames.append(curr)
         return curr
     else:
         oldest_frame = self.history_frames.pop(0)
         self.history_frames.append(curr)
     size = (curr.width, curr.height)
     motion_frame = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.AbsDiff(oldest_frame, curr, motion_frame)
     cv.CmpS(motion_frame, self.threshold, motion_frame, cv.CV_CMP_GT)
     # Eliminate disperse pixels, which occur because of
     # the noise of the camera
     img_temp = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.Erode(motion_frame, img_temp)
     cv.Dilate(img_temp, motion_frame)
     return motion_frame
Esempio n. 29
0
def preprocess(image, addr, extras):
    log = cap.logger(extras, image)
    alpha, dark = rotAngle(image)
    log.log(dark, False)
    clear = clearNoise(image)
    log.log(clear, False)
    straight = cap.doRotate(clear,
                            -alpha,
                            fillval=0,
                            resize=False,
                            interpolation=cv.CV_INTER_NN)
    #cv.Threshold(straight, straight, 128, 255, cv.CV_THRESH_BINARY)
    log.log(straight)
    cv.Dilate(straight, straight)
    cv.Erode(straight, straight)
    log.log(straight)
    cap.processExtras(log.steps, addr, extras, cap.CAP_STAGE_PREPROCESS)
    return straight
Esempio n. 30
0
    def __init__(self,img):

        small_img = cv.CreateImage((cv.Round(img.width / image_scale),cv.Round(img.height / image_scale)), 8, 3)
        cv.Resize(img, small_img, cv.CV_INTER_LINEAR)

        if H!=0 and S !=0:
            getSkinColor(small_img, hasColor)

        imgHSV = cv.CreateImage(cv.GetSize(small_img), 8, 3)
        cv.CvtColor(small_img, imgHSV, cv.CV_BGR2HSV);

        hueImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        satImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        valImg = cv.CreateMat(small_img.height, small_img.width, cv.CV_8UC1)
        cv.Split(imgHSV, hueImg, satImg, valImg, None)

        cv.ShowImage("hueImg", hueImg)

        hueTrshld = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)
        hueDiff = 30
        satDiff = 80
        for x in range(0, hueTrshld.height):
            for y in range(0, hueTrshld.width):
                hueTrshld[x,y] = 0
                if hueImg[x,y]>(H-hueDiff) and hueImg[x,y]>(1) and hueImg[x,y]<(H+hueDiff):
                    if satImg[x,y]>(S-satDiff) and satImg[x,y]<(S+satDiff):
                        hueTrshld[x,y] = 255

        hueTrshldErode = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)        
        hueTrshldDilate = cv.CreateMat(hueImg.height, hueImg.width, cv.CV_8UC1)        


        kernel10 = cv.CreateStructuringElementEx(10,10,0,0, cv.CV_SHAPE_RECT)
        kernel8 = cv.CreateStructuringElementEx(8,8,0,0, cv.CV_SHAPE_RECT)
        kernel6 = cv.CreateStructuringElementEx(6,6,0,0, cv.CV_SHAPE_RECT)
        kernel4 = cv.CreateStructuringElementEx(4,4,0,0, cv.CV_SHAPE_RECT)

        cv.Erode(hueTrshld, hueTrshldErode, kernel6, 1)
        cv.Dilate(hueTrshldErode, hueTrshldDilate, kernel10, 1)

        
        cv.ShowImage("hueTrshldOr", hueTrshld) #original
        cv.ShowImage("hueTrshldDi", hueTrshldDilate) #dilated
        cv.ShowImage("hueTrshldEr", hueTrshldErode)  #eroded