Beispiel #1
0
def edge_threshold(image, roi=None, debug=0):
    thresholded = cv.CloneImage(image)
    horizontal = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_16S, 1)
    magnitude32f = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_32F, 1)
    vertical = cv.CloneImage(horizontal)
    v_edge = cv.CloneImage(image)
    magnitude = cv.CloneImage(horizontal)

    storage = cv.CreateMemStorage(0)
    mag = cv.CloneImage(image)
    cv.Sobel(image, horizontal, 0, 1, 1)
    cv.Sobel(image, vertical, 1, 0, 1)
    cv.Pow(horizontal, horizontal, 2)
    cv.Pow(vertical, vertical, 2)

    cv.Add(vertical, horizontal, magnitude)
    cv.Convert(magnitude, magnitude32f)
    cv.Pow(magnitude32f, magnitude32f, 0.5)
    cv.Convert(magnitude32f, mag)
    if roi:
        cv.And(mag, roi, mag)
    cv.Normalize(mag, mag, 0, 255, cv.CV_MINMAX, None)
    cv.Threshold(mag, mag, 122, 255, cv.CV_THRESH_BINARY)
    draw_image = cv.CloneImage(image)
    and_image = cv.CloneImage(image)
    results = []

    threshold_start = 17
    for window_size in range(threshold_start, threshold_start + 1, 1):
        r = 20
        for threshold in range(0, r):
            cv.AdaptiveThreshold(image, thresholded, 255, \
                cv.CV_ADAPTIVE_THRESH_MEAN_C, cv.CV_THRESH_BINARY_INV, window_size, threshold)
            contour_image = cv.CloneImage(thresholded)
            contours = cv.FindContours(contour_image, storage, cv.CV_RETR_LIST)
            cv.Zero(draw_image)
            cv.DrawContours(draw_image, contours, (255, 255, 255),
                            (255, 255, 255), 1, 1)
            if roi:
                cv.And(draw_image, roi, draw_image)
            cv.And(draw_image, mag, and_image)
            m1 = np.asarray(cv.GetMat(draw_image))
            m2 = np.asarray(cv.GetMat(mag))
            total = mag.width * mag.height  #cv.Sum(draw_image)[0]

            coverage = cv.Sum(and_image)[0] / (mag.width * mag.height)
            if debug:
                print threshold, coverage
                cv.ShowImage("main", draw_image)
                cv.ShowImage("main2", thresholded)
                cv.WaitKey(0)
            results.append((coverage, threshold, window_size))

    results.sort(lambda x, y: cmp(y, x))
    _, threshold, window_size = results[0]
    cv.AdaptiveThreshold(image, thresholded, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C, \
        cv.CV_THRESH_BINARY, window_size, threshold)

    return thresholded
def createMask(image, thresh):
    b, g, r = doSplit(image)
    cv.Threshold(b, b, thresh, 255, cv.CV_THRESH_BINARY)
    cv.Threshold(g, g, thresh, 255, cv.CV_THRESH_BINARY)
    cv.Threshold(r, r, thresh, 255, cv.CV_THRESH_BINARY)
    cv.And(b, g, b, None)
    cv.And(b, r, b, None)
    return b
def rg_filter(r, g, rg_diff=11, b=None):
    #Checking rule: R > G
    rg_sub_binary = first_bigger_then_second(r, g)

    #Checking rule: R > B
    rb_sub_binary = first_bigger_then_second(r, b) if b else None

    #Checking rule: |R - G| >= 11
    rg_diff_thres = abs_diff_threshold(r, g, rg_diff)

    res = image_empty_clone(r)
    cv.And(rg_diff_thres, rg_sub_binary, res)

    if rb_sub_binary:
        cv.And(res, rb_sub_binary, res)
    return res
Beispiel #4
0
def hsv_orange_red_threshold(input_image):
    blur_image = cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC3)
    cv.Smooth(input_image, blur_image, cv.CV_BLUR, 10, 10)
    proc_image = cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC3)
    cv.CvtColor(blur_image, proc_image, cv.CV_BGR2HSV)
    split_image = [
        cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC1),
        cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC1),
        cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC1)
    ]
    cv.Split(proc_image, split_image[0], split_image[1], split_image[2], None)

    thresh_0 = cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC1)
    thresh_1 = cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC1)
    thresh_2 = cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC1)
    red_orange = cv.CreateMat(input_image.rows, input_image.cols, cv.CV_8UC1)
    cv.Threshold(split_image[1], thresh_0, 128, 255,
                 cv.CV_THRESH_BINARY)  # > 50% saturation
    cv.Threshold(split_image[0], thresh_1, 220, 255,
                 cv.CV_THRESH_BINARY)  # > Purple
    cv.Threshold(split_image[0], thresh_2, 10, 255,
                 cv.CV_THRESH_BINARY_INV)  # < Yellow-Orange
    cv.Add(thresh_1, thresh_2, red_orange)
    cv.And(red_orange, thresh_0, red_orange)

    return red_orange
Beispiel #5
0
        def get_dirmarker(img, angle, Dist, radius):
            X, Y = entCenter(robot)
            Len, _ = entSize(robot)
            point = (X + (Dist + Len / 2.0) * cos(angle),
                     Y - (Dist + Len / 2.0) * sin(angle))
            point = intPoint(point)

            #For visualisation:
            # cv.Circle( frame, point, radius, (0,200,200), 1 )

            point2 = point[0] - nhood[0], point[1] - nhood[1]
            out = cv.CloneImage(img)
            cv.Zero(out)
            cv.Circle(out, point2, radius, (255, 255, 255), -1)

            cv.And(out, img2, out)
            center1 = self.centralMoment(out)
            count1 = cv.CountNonZero(out)

            cv.Erode(out, out)
            center2 = self.centralMoment(out)
            count2 = cv.CountNonZero(out)

            if count2 == 0 and count1 > 10:
                return center1
            else:
                return center2
Beispiel #6
0
def findFirstColorPattern(img, pattern):
    """
        try to test if one pixel is in our pattern
    """
    channels = [None, None, None]
    channels[0] = cv.CreateImage(cv.GetSize(img), 8, 1)  #blue
    channels[1] = cv.CreateImage(cv.GetSize(img), 8, 1)  #green
    channels[2] = cv.CreateImage(cv.GetSize(img), 8, 1)  #red
    ch0 = cv.CreateImage(cv.GetSize(img), 8, 1)  #blue
    ch1 = cv.CreateImage(cv.GetSize(img), 8, 1)  #green
    ch2 = cv.CreateImage(cv.GetSize(img), 8, 1)  #red
    cv.Split(img, ch0, ch1, ch2, None)
    dest0 = cv.CreateImage(cv.GetSize(img), 8, 1)
    dest1 = cv.CreateImage(cv.GetSize(img), 8, 1)
    dest2 = cv.CreateImage(cv.GetSize(img), 8, 1)
    dest3 = cv.CreateImage(cv.GetSize(img), 8, 1)
    cv.Smooth(ch0, channels[0], cv.CV_GAUSSIAN, 3, 3, 0)
    cv.Smooth(ch1, channels[1], cv.CV_GAUSSIAN, 3, 3, 0)
    cv.Smooth(ch2, channels[2], cv.CV_GAUSSIAN, 3, 3, 0)
    result = []
    for i in range(3):
        lower = pattern[i][2] - 25
        upper = pattern[i][2] + 25
        cv.InRangeS(channels[0], lower, upper, dest0)
        lower = pattern[i][1] - 25
        upper = pattern[i][1] + 25
        cv.InRangeS(channels[1], lower, upper, dest1)
        lower = pattern[i][0] - 25
        upper = pattern[i][0] + 25
        cv.InRangeS(channels[2], lower, upper, dest2)
        cv.And(dest0, dest1, dest3)
        temp = cv.CreateImage(cv.GetSize(img), 8, 1)
        cv.And(dest2, dest3, temp)
        result.append(temp)

    cv.ShowImage("result0", result[0])
    cv.WaitKey(0)
    cv.ShowImage("result1", result[1])
    cv.WaitKey(0)
    cv.ShowImage("result2", result[2])
    cv.WaitKey(0)
    cv.Or(result[0], result[1], dest0)
    cv.Or(dest0, result[2], dest3)
    cv.NamedWindow("result", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("result", dest3)
    cv.WaitKey(0)
    return dest3
Beispiel #7
0
 def checkRange(self, src, lowBound, highBound):
     size = im.size(src)
     mask = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     gt_low = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.CmpS(src, lowBound, gt_low, cv.CV_CMP_GT)
     lt_high = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
     cv.CmpS(src, highBound, lt_high, cv.CV_CMP_LT)
     cv.And(gt_low, lt_high, mask)
     return mask
def norm_rg_filter(r, g, b):
    nr, ng, _ = get_normalized_rgb_planes(r, g, b)

    nr_mask = get_filtered_plane(nr, ((0.33, 0.6), ), probability_to_255)
    ng_mask = get_filtered_plane(ng, ((0.25, 0.37), ), probability_to_255)

    res = image_empty_clone(nr)
    cv.And(nr_mask, ng_mask, res)
    return res
Beispiel #9
0
    def find_blobs(self, frame, debug_image):
        '''Find blobs in an image.

        Hopefully this gets blobs that correspond with
        buoys, but any intelligent checking is done outside of this function.

        '''

        # Get Channels
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        saturation = libvision.misc.get_channel(hsv, 1)
        red = libvision.misc.get_channel(frame, 2)

        # Adaptive Threshold
        cv.AdaptiveThreshold(
            saturation,
            saturation,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY_INV,
            self.saturation_adaptive_thresh_blocksize -
            self.saturation_adaptive_thresh_blocksize % 2 + 1,
            self.saturation_adaptive_thresh,
        )
        cv.AdaptiveThreshold(
            red,
            red,
            255,
            cv.CV_ADAPTIVE_THRESH_MEAN_C,
            cv.CV_THRESH_BINARY,
            self.red_adaptive_thresh_blocksize -
            self.red_adaptive_thresh_blocksize % 2 + 1,
            -1 * self.red_adaptive_thresh,
        )

        kernel = cv.CreateStructuringElementEx(9, 9, 4, 4, cv.CV_SHAPE_ELLIPSE)
        cv.Erode(saturation, saturation, kernel, 1)
        cv.Dilate(saturation, saturation, kernel, 1)
        cv.Erode(red, red, kernel, 1)
        cv.Dilate(red, red, kernel, 1)

        buoys_filter = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.And(saturation, red, buoys_filter)

        if debug_image:
            svr.debug("Saturation", saturation)
            svr.debug("Red", red)
            svr.debug("AdaptiveThreshold", buoys_filter)

        # Get blobs
        labeled_image = cv.CreateImage(cv.GetSize(buoys_filter), 8, 1)
        blobs = libvision.blob.find_blobs(buoys_filter, labeled_image,
                                          MIN_BLOB_SIZE, 10)

        return blobs, labeled_image
Beispiel #10
0
def sub_intersection(amap, apoly, maxwidth, maxheight):
    polymap = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.FillPoly(polymap, [apoly], im.color.blue)
    intersection = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.And(polymap, amap, intersection)
    cv.Sub(amap, intersection, amap)
def filter_by_hsv(img, ranges):
    h, s, v = get_hsv_planes(img)

    h_mask = get_filtered_plane(h, ranges["h"], h2cv_values)
    s_mask = get_filtered_plane(s, ranges["s"], probability_to_255)
    v_mask = get_filtered_plane(v, ranges["v"], probability_to_255)

    hsv_mask = image_empty_clone(v_mask)
    cv.And(v_mask, s_mask, hsv_mask, mask=h_mask)
    return hsv_mask
Beispiel #12
0
def has_intersection(amap, apoly, maxwidth, maxheight):
    polymap = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.FillPoly(polymap, [apoly], im.color.blue)
    intersection = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.And(polymap, amap, intersection)
    m=cv.Moments(cv.GetMat(intersection), True)
    return bool(cv.GetSpatialMoment(m, 0, 0))
Beispiel #13
0
    def _detectSkin(self, bgrimg):
        hsvimg = im.bgr2hsv(bgrimg)
        h, s, v = im.split3(bgrimg)
        skin_mask = cv.CreateImage(im.size(hsvimg), cv.IPL_DEPTH_8U, 1)
        h_mask = cv.CreateImage(im.size(hsvimg), cv.IPL_DEPTH_8U, 1)
        v_mask = cv.CreateImage(im.size(hsvimg), cv.IPL_DEPTH_8U, 1)

        v_mask = self.checkRange(v, self.v_low, self.v_high)
        h_mask = self.checkRange(h, self.h_low, self.h_high)
        cv.And(h_mask, v_mask, skin_mask)

        return skin_mask
Beispiel #14
0
def process_image(filename):
    print(filename)
    orig = cv.LoadImage(filename)
    output = cv.LoadImage(filename)
    print("Not filename error")
    # create tmp images
    rrr = cv.CreateImage((orig.width, orig.height), cv.IPL_DEPTH_8U, 1)
    ggg = cv.CreateImage((orig.width, orig.height), cv.IPL_DEPTH_8U, 1)
    bbb = cv.CreateImage((orig.width, orig.height), cv.IPL_DEPTH_8U, 1)
    processed = cv.CreateImage((orig.width, orig.height), cv.IPL_DEPTH_8U, 1)
    storage = cv.CreateMat(orig.width, 1, cv.CV_32FC3)
    print("Checkpoint 1", storage)
    #split image into RGB components
    cv.Split(orig, rrr, ggg, bbb, None)
    #process each component
    channel_processing(rrr)
    channel_processing(ggg)
    channel_processing(bbb)
    #combine images using logical 'And' to avoid saturation
    cv.And(rrr, ggg, rrr)
    cv.And(rrr, bbb, processed)
    #cv.ShowImage('before canny', processed)
    #cv.SaveImage('case3_processed.jpg',processed)
    #use canny, as HoughCircles seems to prefer ring like circles to filled ones.
    cv.Canny(processed, processed, 5, 70, 3)
    #smooth to reduce noise a bit more
    cv.Smooth(processed, processed, cv.CV_GAUSSIAN, 7, 7)
    print("Checkpoint 2")
    #cv.ShowImage('processed', processed)
    #find circles, with parameter search
    storage = find_circles(processed, storage, 100)
    print("Checkpoint 3")
    cir = get_circles(storage)

    if (len(cir) == 1):
        draw_circles(storage, output)
        cv.ShowImage("original with circles", output)
        cv.SaveImage('output_' + filename, output)
        cv.WaitKey(0)
        return cir[0]
def skin_mask(img):
    r, g, b = get_rgb_planes(img)
    hsv_mask = filter_by_hsv(img, {
        "h": ((0, 50), (340, 360)),
        "s": ((0.12, 0.7), ),
        "v": ((0.3, 1), )
    })

    rg_mask = rg_filter(r, g)
    nr_ng_mask = norm_rg_filter(r, g, b)

    tmp = image_empty_clone(hsv_mask)
    total_mask = image_empty_clone(hsv_mask)
    cv.And(hsv_mask, rg_mask, tmp)
    cv.And(tmp, nr_ng_mask, total_mask)

    #TODO What is this?
    th = image_empty_clone(total_mask)
    cv.Smooth(total_mask, total_mask, cv.CV_MEDIAN, 5, 5)
    cv.Threshold(total_mask, th, 25, 255, cv.CV_THRESH_BINARY)

    return total_mask
Beispiel #16
0
def rects_intersection(rects, maxwidth, maxheight):
    if not rects:
        return
    intersection = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
    cv.FillPoly(intersection, [rects[0]], im.color.blue)
    for r in rects:
        canvas = cv.CreateImage((maxwidth,
                         maxheight),
                         cv.IPL_DEPTH_8U,1)
        cv.FillPoly(canvas, [r], im.color.blue)
        cv.And(canvas, intersection, intersection)
    return im.find_contour(intersection)
Beispiel #17
0
    def remove_background_values(self, frame):
        self.Imask = self.remove_background(frame)

        logging.debug(
            "Using thresholded background subtracted image as a mask")
        #cv.ShowImage("ASD", self.Imask)
        self.Igray = self.threshold.foreground(self.Imask)
        cv.CvtColor(self.Imask, self.Igray, cv.CV_BGR2GRAY)
        cv.Threshold(self.Igray, self.Igray, 200, 255, cv.CV_THRESH_OTSU)
        #cv.EqualizeHist(self.Igray, self.Igray)
        cv.CvtColor(self.Igray, self.Imask, cv.CV_GRAY2BGR)

        #Finally, return the salient bits of the original frame
        cv.And(self.Imask, frame, self.Iobjects)

        return self.Iobjects, self.Igray
Beispiel #18
0
    def run(self):
  
        while not self._stop.isSet():
            task  = self.q.get()
            
            if task != None:
                obj, image = task
        
                rect = cv.BoundingRect(obj.cont)
                siftimage = siftfastpy.Image(rect[2], rect[3])
                cv.CvtColor(image, self.gray, cv.CV_BGR2GRAY)
                cv.SetZero(self.mask)
                cv.FillPoly(self.mask, [obj.cont], cv.Scalar(255))
                cv.And(self.gray, self.mask, self.gray)
                gnp = np.asarray(cv.GetSubRect(self.gray, rect))
                siftimage.SetData(gnp)
                t0 = time.time()

                # compute keypoints and time how long it takes
                frames,desc = siftfastpy.GetKeypoints(siftimage)
                self.stats.append((rect[2]*rect[3], time.time() - t0))

                # compute feature vector
                tmp = np.concatenate((frames[:,2:4], desc), axis=1).astype('float64')
                
                # search in the flann tree for the feature vectors
                n = 2
                thr = 1.5
                result, dists = self.flann.nn_index(tmp, n, checks=32)
                
                # ismatch contains the indices in the testset for which a match is found
                ismatch = dists[:,1] > thr * dists[:,0]
                               
                # meta contains the index to object-ID mapping
                obj.ids = []
                for i, res in enumerate(result):
                    if ismatch[i]:
                        obj.ids.append(self.meta[res[0]][0])
#                obj.ids = [self.meta[res][0] for i, res in enumerate(result) if ismatch[i]]

                
                # transfer keypoints back to full frame coordinates
                frames[:,0] += rect[0]
                frames[:,1] += rect[1] 
                obj.frames = frames
                obj.desc = desc
Beispiel #19
0
    def _detectSkin(self, bgrimg):
        #hsvimg = im.bgr2hsv(bgrimg)
        hsvimg = cv.CreateImage((bgrimg.width, bgrimg.height), 8, 3)
        cv.CvtColor(bgrimg, hsvimg, cv.CV_RGB2HSV)
        #h,s,v = im.split3(bgrimg)
        skin_mask = cv.CreateImage((bgrimg.width, bgrimg.height), 8, 1)
        low = (self.h_low, self.v_low, 0)
        high = (self.h_high, self.v_high, 256)
        cv.InRangeS(hsvimg, low, high, skin_mask)
        #cv.ShowImage("inrange", skin_mask)

        face_mask = face.blockfacemask(bgrimg)
        cv.And(skin_mask, face_mask, skin_mask)
        #h_mask = cv.CreateImage(im.size(hsvimg), cv.IPL_DEPTH_8U, 1)
        #s_mask = cv.CreateImage(im.size(hsvimg), cv.IPL_DEPTH_8U, 1)
        #print self.v_low, self.v_high, self.h_low, self.h_high
        #s_mask = self.checkRange(s, self.v_low, self.v_high)
        #h_mask = self.checkRange(h, self.h_low, self.h_high)
        #cv.And(h_mask, s_mask, skin_mask)

        return skin_mask
Beispiel #20
0
def main(args):
    image = cv.LoadImage(args.image)
    prefix, extension = os.path.splitext(args.image)
    output_path = "%s.mask.png" % (prefix)
    if os.path.exists(output_path):
        mask = cv.LoadImage(output_path, cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        mask = cv.CreateImage((image.width, image.height), 8, 1)
        cv.Set(mask, 255)
    if args.threshold:
        print "Thresholding"
        image_hue = cv.CreateImage(cv.GetSize(image), 8, 1)
        image_sat = cv.CreateImage(cv.GetSize(image), 8, 1)
        image_val = cv.CreateImage(cv.GetSize(image), 8, 1)

        image_hsv = cv.CreateImage(cv.GetSize(image), 8, 3)

        mask_hue = cv.CreateImage((image.width, image.height), 8, 1)
        mask_val = cv.CreateImage((image.width, image.height), 8, 1)
        mask_new = cv.CreateImage((image.width, image.height), 8, 1)
        cv.CvtColor(image, image_hsv, cv.CV_BGR2HSV)
        cv.Split(image_hsv, image_hue, image_sat, image_val, None)
        upper_thresh_hue = cv.CreateImage(cv.GetSize(image), 8, 1)
        lower_thresh_hue = cv.CreateImage(cv.GetSize(image), 8, 1)
        upper_thresh_val = cv.CreateImage(cv.GetSize(image), 8, 1)
        lower_thresh_val = cv.CreateImage(cv.GetSize(image), 8, 1)
        cv.Threshold(image_hue, upper_thresh_hue, args.hue_max, 255,
                     cv.CV_THRESH_BINARY)
        cv.Threshold(image_hue, lower_thresh_hue, args.hue_min, 255,
                     cv.CV_THRESH_BINARY_INV)
        cv.Threshold(image_val, upper_thresh_val, 235, 255,
                     cv.CV_THRESH_BINARY)
        cv.Threshold(image_val, lower_thresh_val, 30, 255,
                     cv.CV_THRESH_BINARY_INV)
        cv.Or(upper_thresh_hue, lower_thresh_hue, mask_hue)
        cv.Or(upper_thresh_val, lower_thresh_val, mask_val)
        cv.Or(mask_hue, mask_val, mask_new)
        cv.And(mask, mask_new, mask)

    MaskWindow(image, mask, output_path, args.brush_size, args.zoom_out)
Beispiel #21
0
    def get_near_region_mask(self, blds):
        regions = [bd.near_region_poly for bd in blds]
        c = cg.rects_intersection(regions, self.label_img.width,
                                  self.label_img.height)
        # subtract buildings
        equ_class_region = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        canvas = cv.CloneImage(self.label_img)
        cv.FillPoly(equ_class_region, [c], im.color.blue)
        cv.CmpS(canvas, 0, canvas, cv.CV_CMP_EQ)
        cv.And(equ_class_region, canvas, equ_class_region)

        # subtract near of near's neighbor
        near_of_near = set(self.buildings)
        for bd in blds:
            near_of_near = near_of_near.union(bd.near_set)
        near_of_near.difference_update(set(blds))
        near_regions = [bd.near_region_poly for bd in near_of_near]
        for reg, bd in zip(near_regions, near_of_near):
            if cg.has_intersection(equ_class_region, reg, self.label_img.width,
                                   self.label_img.height):
                cg.sub_intersection(equ_class_region, reg,
                                    self.label_img.width,
                                    self.label_img.height)
        # equ_class_region -= near of near via nearest
        for bd in near_of_near:
            eq_region = self.get_equivalent_region_via_nearest(bd)
            c = im.find_contour(eq_region)
            while c:
                if cg.has_intersection(equ_class_region, list(c),
                                       self.label_img.width,
                                       self.label_img.height):
                    cg.sub_intersection(equ_class_region, list(c),
                                        self.label_img.width,
                                        self.label_img.height)
                c = c.h_next()

        return equ_class_region
Beispiel #22
0
                    frameDiffFundoSaida = cv.CloneImage(frameDiff)
                    cv.Threshold(frameDiff, frameDiff, p_diffThreshold, 255,
                                 cv.CV_THRESH_BINARY)

                    #for i in range(2): cv.Erode(frameDiff,frameDiff)
                    #zera diferenca onde ha sombra
                    frameSemSombra = cv.CloneImage(imgSombra)
                    # dilata sombra para pegar qualquer borada de sombra
                    #for i in range(2): cv.Erode(frameSemSombra,frameSemSombra)
                    for i in range(2):
                        cv.Dilate(frameSemSombra, frameSemSombra)

                    # inverte e faz um AND
                    cv.Not(frameSemSombra, frameSemSombra)

                    cv.And(frameSemSombra, frameDiff, frameSemSombra)

                    frameDiff = cv.CloneImage(frameSemSombra)

                    frameDiffSemSombra = cv.CloneImage(frameDiff)
                    '''
                    Faca uma copia da imagem.
                    
                    De um cvSmooth com metodo CV_MEDIAN para tirar ruidos da copia.
                    
                    De open (erode + dilate) na copia.
                    
                        O Erode deve eliminar os ruidos restantes. Voce pode usar um kernel horizontal e depois um vertical.
                        Faca os dilates terem 1 grau a mais que o erode (na imagem final o branco que restara e maior que na original)
                    
                    Faca um AND da imagem original com a imagem do Open.
def pegarIris(orig, nameFoto="teste.bmp"):
    orig2 = cv.CloneImage(orig)

    # create tmp images
    grey_scale = cv.CreateImage(cv.GetSize(orig), 8, 1)
    processedPupila = cv.CreateImage(cv.GetSize(orig), 8, 1)
    processedIris = cv.CreateImage(cv.GetSize(orig), 8, 1)

    cv.Smooth(orig, orig, cv.CV_GAUSSIAN, 3, 3)

    cv.CvtColor(orig, grey_scale, cv.CV_RGB2GRAY)
    cv.CvtColor(orig, processedIris, cv.CV_RGB2GRAY)

    cv.Smooth(grey_scale, processedPupila, cv.CV_GAUSSIAN, 15, 15)
    cv.Canny(processedPupila, processedPupila, 5, 70, 3)
    cv.Smooth(processedPupila, processedPupila, cv.CV_GAUSSIAN, 15, 15)
    #cv.ShowImage("pupila_processada", processedPupila)

    cv.Smooth(grey_scale, processedIris, cv.CV_GAUSSIAN, 15, 15)
    cv.Canny(processedIris, processedIris, 5, 70, 3)
    cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)
    cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)
    #cv.ShowImage("pupila_processada2", processedIris)

    #cv.Erode(processedIris, processedIris, None, 10)
    #cv.Dilate(processedIris, processedIris, None, 10)
    #cv.Canny(processedIris, processedIris, 5, 70, 3)
    #cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)
    #cv.Smooth(processedIris, processedIris, cv.CV_GAUSSIAN, 15, 15)

    #cv.Smooth(processedPupila, processedIris, cv.CV_GAUSSIAN, 15, 15)
    #cv.ShowImage("Iris_processada", processedIris)
    #cv.Dilate(processedIris, processedIris, None, 10)

    storagePupila = cv.CreateMat(orig.width, 1, cv.CV_32FC3)
    storageIris = cv.CreateMat(orig.width, 1, cv.CV_32FC3)

    # these parameters need to be adjusted for every single image
    HIGH = 30
    LOW = 20

    HIGH2 = 120
    LOW2 = 60

    imgBranca = cv.CreateImage(cv.GetSize(orig), 8, 3)
    imgPreta = cv.CreateImage(cv.GetSize(orig), 8, 3)
    cv.Zero(imgPreta)
    cv.Not(imgPreta, imgBranca)

    imagemMaskPupila = cv.CreateImage(cv.GetSize(orig), 8, 3)
    imagemMaskPupila = cv.CloneImage(imgBranca)

    imagemMaskIris = cv.CreateImage(cv.GetSize(orig), 8, 3)
    imagemMaskIris = cv.CloneImage(imgPreta)

    #try:
    # extract circles
    #cv2.cv.HoughCircles(processedIris, storageIris, cv.CV_HOUGH_GRADIENT, 3, 100.0,LOW,HIGH, LOW2, HIGH2)
    cv2.cv.HoughCircles(processedPupila, storagePupila, cv.CV_HOUGH_GRADIENT,
                        2, 100.0, LOW, HIGH)
    cv2.cv.HoughCircles(processedIris, storageIris, cv.CV_HOUGH_GRADIENT, 3,
                        100.0, LOW, HIGH, LOW2, HIGH2)

    #Circulos da pupila
    #for i in range(0, len(np.asarray(storagePupila))):
    RadiusPupila = int(np.asarray(storagePupila)[0][0][2])
    xPupila = int(np.asarray(storagePupila)[0][0][0])
    yPupila = int(np.asarray(storagePupila)[0][0][1])
    centerPupila = (xPupila, yPupila)
    #print "RadiusPupila %d" %RadiusPupila

    cv.Circle(imagemMaskPupila, centerPupila, RadiusPupila, cv.CV_RGB(0, 0, 0),
              -1, 8, 0)
    cv.Circle(orig, centerPupila, 1, cv.CV_RGB(0, 255, 0), -1, 8, 0)
    cv.Circle(orig, centerPupila, RadiusPupila, cv.CV_RGB(255, 0, 0), 3, 8, 0)
    #cv.ShowImage("pupila"+str(0), orig)
    orig = cv.CloneImage(orig2)

    #cv.WaitKey(0)

    #Circulos da Iris
    #for i in range(0, len(np.asarray(storageIris))):
    RadiusIris = int(np.asarray(storageIris)[0][0][2])
    xIris = int(np.asarray(storageIris)[0][0][0])
    yIris = int(np.asarray(storageIris)[0][0][1])
    centerIris = (xIris, yIris)
    #print "RadiusIris %d" %RadiusIris

    cv.Circle(imagemMaskIris, centerIris, RadiusIris, cv.CV_RGB(255, 255, 255),
              -1, 8, 0)

    cv.Circle(orig, centerIris, 1, cv.CV_RGB(0, 255, 0), -1, 8, 0)
    cv.Circle(orig, centerIris, RadiusIris, cv.CV_RGB(255, 0, 0), 3, 8, 0)
    #cv.ShowImage("Iris"+str(0), orig)
    orig = cv.CloneImage(orig2)

    #cv.WaitKey(0)

    #except:
    #    print "nothing found"
    #    pass
    #criando imagem final
    finalAux = cv.CreateImage(cv.GetSize(orig), 8, 3)
    final = cv.CreateImage(cv.GetSize(orig), 8, 3)

    #pegando a iris toda
    cv.And(orig, imagemMaskPupila, finalAux)
    cv.And(finalAux, imagemMaskIris, final)

    cv.SaveImage(nameFoto, final)
    #cv.ShowImage("original with circles", final)

    #cv.WaitKey(0)

    return final
Beispiel #24
0
def DetectaSombra(frame, bg):

    dbg = 1

    if dbg:
        t1 = time.time()

    print 'Detectando sombras na imagem...'

    # gera as imagens de cada canal RGB
    imgCinza = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    imgHSV = cv.CloneImage(frame)

    imgH = cv.CloneImage(imgCinza)
    imgS = cv.CloneImage(imgCinza)
    imgV = cv.CloneImage(imgCinza)

    imgR = cv.CloneImage(imgCinza)
    imgG = cv.CloneImage(imgCinza)
    imgB = cv.CloneImage(imgCinza)

    bgCinza = cv.CreateImage(cv.GetSize(bg), cv.IPL_DEPTH_8U, 1)
    bgHSV = cv.CloneImage(bg)

    bgH = cv.CloneImage(bgCinza)
    bgS = cv.CloneImage(bgCinza)
    bgV = cv.CloneImage(bgCinza)

    bgR = cv.CloneImage(bgCinza)
    bgG = cv.CloneImage(bgCinza)
    bgB = cv.CloneImage(bgCinza)

    # gera as imagens de cada frame e backgroun nos canais de HSV e RGB
    cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)
    cv.Split(imgHSV, imgH, imgS, imgV, None)
    cv.Split(frame, imgR, imgG, imgB, None)

    cv.CvtColor(bg, bgHSV, cv.CV_BGR2HSV)
    cv.Split(bgHSV, bgH, bgS, bgV, None)
    cv.Split(bg, bgR, bgG, bgB, None)

    # inicio de calculos para descobrir sombras.
    ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.Div(imgV, bgV, ivbv, 255)

    isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.Sub(imgS, bgS, isbs)

    ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.AbsDiff(imgH, bgH, ihbh)

    # parametros de deteccao de sombra
    alfa = 190
    beta = 210

    thrSat = 20
    thrHue = 50

    alfa = 220
    beta = 240

    thrSat = 90
    thrHue = 90

    nErode = 0
    nDilate = 0

    # trata ivbv
    imgThr_ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que beta
    cv.Threshold(ivbv, imgThr_ivbv, beta, 255, cv.CV_THRESH_TRUNC)
    # deixa apenas os maiores que alfa
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_TOZERO)
    # binariza
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_BINARY)

    # trata isbs
    imgThr_isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que thrSat
    cv.Threshold(isbs, imgThr_isbs, thrSat, 255, cv.CV_THRESH_BINARY)

    # trata isbs
    imgThr_ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que thrSat
    cv.Threshold(ihbh, imgThr_ihbh, thrHue, 255, cv.CV_THRESH_BINARY_INV)

    # onde é preto em todas as imagens, é sombra
    imgSombra = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

    cv.Not(imgThr_ivbv, imgThr_ivbv)
    cv.Not(imgThr_isbs, imgThr_isbs)

    cv.And(imgThr_ivbv, imgThr_isbs, imgSombra)

    cv.Not(imgThr_ihbh, imgThr_ihbh)

    cv.And(imgSombra, imgThr_ihbh, imgSombra)

    for i in range(nErode):
        cv.Erode(imgSombra, imgSombra)

    for i in range(nDilate):
        cv.Dilate(imgSombra, imgSombra)

    if dbg:
        print 'Tempo para detectar sombras: %.5f' % (time.time() - t1)
    #exibe frames de saida

    #destaca de verde a sombra sobre o frame
    frameDestacado = cv.CloneImage(frame)

    cv.Or(imgG, imgSombra, imgG)

    cv.Merge(imgR, imgG, imgB, None, frameDestacado)
    '''    
    cv.ShowImage('frameDestacado',frameDestacado)
    cv.WaitKey()
    '''

    retorno = {}
    retorno['sombra'] = imgSombra
    retorno['sombraDestacada'] = frameDestacado

    return retorno

    cv.ShowImage('ivbv', ivbv)
    cv.ShowImage('isbs', isbs)
    cv.ShowImage('ihbh', ihbh)

    cv.ShowImage('imgThr_isbs', imgThr_isbs)
    cv.ShowImage('imgThr_ivbv', imgThr_ivbv)
    cv.ShowImage('imgThr_ihbh', imgThr_ihbh)

    cv.ShowImage('imgSombra', imgSombra)

    cv.WaitKey()

    sys.exit()

    frameMerge = cv.CloneImage(frame)
    cv.Merge(imgR, imgR, imgR, None, frameMerge)

    cv.ShowImage('frame', frame)
    cv.ShowImage('frameMerge', frameMerge)

    cv.ShowImage('imgR', imgR)
    cv.ShowImage('imgG', imgG)
    cv.ShowImage('imgB', imgB)

    cv.ShowImage('imgH', imgH)
    cv.ShowImage('imgS', imgS)
    cv.ShowImage('imgV', imgV)

    cv.WaitKey()

    return 0
Beispiel #25
0
def threshold(image,
              bg_mode,
              filter_pr2,
              crop_rect=None,
              cam_info=None,
              listener=None,
              hue_interval=(0, 180)):
    image_hsv = cv.CloneImage(image)
    cv.CvtColor(image, image_hsv, cv.CV_RGB2HSV)  #TODO: THIS SHOULD BE BGR
    image_hue = cv.CreateImage(cv.GetSize(image_hsv), 8, 1)
    image_gray = cv.CreateImage(cv.GetSize(image_hsv), 8, 1)
    cv.CvtColor(image, image_gray, cv.CV_RGB2GRAY)
    cv.Split(image_hsv, image_hue, None, None, None)
    image_thresh = cv.CloneImage(image_gray)
    hue_low = hue_interval[0]
    hue_up = hue_interval[1]
    if bg_mode == GREEN_BG:
        upper_thresh = cv.CloneImage(image_hue)
        lower_thresh = cv.CloneImage(image_hue)
        black_thresh = cv.CloneImage(image_hue)
        cv.Threshold(
            image_hue, upper_thresh, 80, 255,
            cv.CV_THRESH_BINARY)  #upper_thresh = white for all h>80, black o/w
        cv.Threshold(image_hue, lower_thresh, 40, 255, cv.CV_THRESH_BINARY_INV
                     )  #lower_thresh = white for all h<30, black o/w
        cv.Threshold(image_gray, black_thresh, 1, 255, cv.CV_THRESH_BINARY
                     )  #black_thresh = black for pure black, white o/w
        #Filter out the green band of the hue
        cv.Or(upper_thresh, lower_thresh,
              image_thresh)  #image_thresh = white for all h<30 OR h>80
        #Filter out pure black, for boundaries in birdseye
        cv.And(
            image_thresh, black_thresh, image_thresh
        )  #image_thresh = white for all non-pure-black pixels and (h<30 or h>80)

    elif bg_mode == WHITE_BG:
        cv.Threshold(image_gray, image_thresh, 250, 255,
                     cv.CV_THRESH_BINARY_INV
                     )  #image_gray = white for all non-super white, black o/w
    elif bg_mode == YELLOW_BG:
        upper_thresh = cv.CloneImage(image_hue)
        lower_thresh = cv.CloneImage(image_hue)
        black_thresh = cv.CloneImage(image_hue)
        cv.Threshold(image_hue, upper_thresh, 98, 255, cv.CV_THRESH_BINARY)
        cv.Threshold(image_hue, lower_thresh, 85, 255, cv.CV_THRESH_BINARY_INV)
        cv.Threshold(image_gray, black_thresh, 1, 255, cv.CV_THRESH_BINARY)
        #Filter out the yellow band of the hue
        cv.Or(upper_thresh, lower_thresh,
              image_thresh)  #image_thresh = white for all h<85 OR h>98
        #Filter out pure black, for boundaries in birdseye
        cv.And(
            image_thresh, black_thresh, image_thresh
        )  #image_thresh = white for all non-pure-black pixels and (h<30 or h>80)
    elif bg_mode == CUSTOM:
        upper_thresh = cv.CloneImage(image_hue)
        lower_thresh = cv.CloneImage(image_hue)
        black_thresh = cv.CloneImage(image_hue)
        cv.Threshold(image_hue, upper_thresh, hue_up, 255, cv.CV_THRESH_BINARY)
        cv.Threshold(image_hue, lower_thresh, hue_low, 255,
                     cv.CV_THRESH_BINARY_INV)
        cv.Threshold(image_gray, black_thresh, 1, 255, cv.CV_THRESH_BINARY)
        #Filter out the selected band of the hue
        cv.Or(upper_thresh, lower_thresh,
              image_thresh)  #image_thresh = white for all h outside range
        #Filter out pure black, for boundaries in birdseye
        cv.And(
            image_thresh, black_thresh, image_thresh
        )  #image_thresh = white for all non-pure-black pixels and h outside range)
        cv.Erode(image_thresh, image_thresh)  #Opening to remove noise
        cv.Dilate(image_thresh, image_thresh)
    #set all pixels outside the crop_rect to black
    if crop_rect:
        (x, y, width, height) = crop_rect
        for j in range(image_thresh.height):
            for i in range(x):
                image_thresh[j, i] = 0
            for i in range(x + width, image_thresh.width):
                image_thresh[j, i] = 0
        for i in range(image_thresh.width):
            for j in range(y):
                image_thresh[j, i] = 0
            for j in range(y + height, image_thresh.height):
                image_thresh[j, i] = 0

    if filter_pr2:
        #Filter out grippers
        cam_frame = cam_info.header.frame_id
        now = rospy.Time.now()
        for link in ("l_gripper_l_finger_tip_link",
                     "r_gripper_l_finger_tip_link"):
            listener.waitForTransform(cam_frame, link, now,
                                      rospy.Duration(10.0))
            l_grip_origin = PointStamped()
            l_grip_origin.header.frame_id = link
            l_grip_in_camera = listener.transformPoint(cam_frame,
                                                       l_grip_origin)
            camera_model = image_geometry.PinholeCameraModel()
            camera_model.fromCameraInfo(cam_info)
            (u, v) = camera_model.project3dToPixel(
                (l_grip_in_camera.point.x, l_grip_in_camera.point.y,
                 l_grip_in_camera.point.z))
            if link[0] == "l":
                x_range = range(0, u)
            else:
                x_range = range(u, image_thresh.width)
            if 0 < u < image_thresh.width and 0 < v < image_thresh.height:
                for x in x_range:
                    for y in range(0, image_thresh.height):
                        image_thresh[y, x] = 0.0
    save_num = 0
    cv.SaveImage("/tmp/thresholded_%d.png" % save_num, image_thresh)
    save_num = save_num + 1
    return image_thresh
Beispiel #26
0
    def show(self):
        """ Process and show the current frame """
        source = cv.LoadImage(self.files[self.index])
        width, height = cv.GetSize(source)

        center = (width / 2) + self.offset

        cv.Line(source, (center, 0), (center, height), (0, 255, 0), 1)

        if self.roi:
            x, y, a, b = self.roi

            print self.roi

            width, height = ((a - x), (b - y))
            mask = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

            cv.SetImageROI(source, (x, y, width, height))
            cv.Split(source, None, None, mask, None)

            gray = cv.CloneImage(mask)

            cv.InRangeS(mask, self.thresholdMin, self.thresholdMax, mask)
            cv.And(mask, gray, gray)

            line = []
            points = []

            for i in range(0, height - 1):
                row = cv.GetRow(gray, i)

                minVal, minLoc, maxLoc, maxVal = cv.MinMaxLoc(row)

                y = i
                x = maxVal[0]
                point = (0, 0, height - i)

                if x > 0:
                    line.append((x, y))

                    s = x / sin(radians(self.camAngle))
                    x = s * cos(self.angles[self.index])
                    z = height - y
                    y = s * sin(self.angles[self.index])

                    point = (round(x, 2), round(y, 2), z)

                points.append(point)

            cv.PolyLine(source, [line], False, (255, 0, 0), 2, 8)
            cv.ResetImageROI(source)
            x, y, a, b = self.roi
            cv.Rectangle(source, (int(x), int(y)), (int(a), int(b)),
                         (255.0, 255, 255, 0))

        if self.roi:
            x, y, a, b = self.roi

            width, height = ((a - x), (b - y))
            mask = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

            cv.SetImageROI(
                source, (x - width, y, width, height))  # moves roi to the left
            cv.Split(source, None, None, mask, None)

            gray = cv.CloneImage(mask)

            cv.InRangeS(mask, self.thresholdMin, self.thresholdMax, mask)
            cv.And(mask, gray, gray)

            line = []
            points2 = []

            for i in range(0, height - 1):
                row = cv.GetRow(gray, i)

                minVal, minLoc, maxLoc, maxVal = cv.MinMaxLoc(row)

                y = i
                x = maxVal[0]
                point = (0, 0, height - i)

                if x > 0:
                    line.append((x, y))

                    x = width - x
                    # left to the x-axis

                    s = x / sin(radians(self.camAngle))

                    x = s * cos(self.angles[self.index])
                    z = height - y  # 500 higher then the other.
                    y = s * sin(self.angles[self.index])

                    a = radians(300)

                    nx = (cos(a) * x) - (sin(a) * y)
                    ny = (sin(a) * x) + (cos(a) * y)

                    point = (nx, ny, z)

                points2.append(point)

            cv.PolyLine(source, [line], False, (255, 0, 0), 2, 8)
            cv.ResetImageROI(source)
            x, y, a, b = self.roi
            cv.Rectangle(source, (int(x), int(y)), (int(a), int(b)),
                         (255.0, 255, 255, 0))

        if self.mode == 'mask':
            cv.ShowImage('preview', mask)
            return

        if self.mode == 'record' and self.roi:
            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 1)
            cv.PutText(source, "recording %d" % self.index, (20, 20), font,
                       (0, 0, 255))
            self.points.extend(points)
            self.points2.extend(points2)
            #self.colors.extend(colors);

        cv.ShowImage('preview', source)
Beispiel #27
0
def main():

    # create windows
    create_and_position_window('Thresholded_HSV_Image', 10, 10)
    create_and_position_window('RGB_VideoFrame', 10 + cam_width, 10)

    create_and_position_window('Hue', 10, 10 + cam_height)
    create_and_position_window('Saturation', 210, 10 + cam_height)
    create_and_position_window('Value', 410, 10 + cam_height)
    create_and_position_window('LaserPointer', 0, 0)

    capture = setup_camera_capture()

    # create images for the different channels
    h_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    s_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    v_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    laser_img = cv.CreateImage((cam_width, cam_height), 8, 1)
    cv.SetZero(h_img)
    cv.SetZero(s_img)
    cv.SetZero(v_img)
    cv.SetZero(laser_img)

    while True:
        # 1. capture the current image
        frame = cv.QueryFrame(capture)
        if frame is None:
            # no image captured... end the processing
            break

        hsv_image = cv.CloneImage(frame)  # temporary copy of the frame
        cv.CvtColor(frame, hsv_image, cv.CV_BGR2HSV)  # convert to HSV

        # split the video frame into color channels
        cv.Split(hsv_image, h_img, s_img, v_img, None)

        # Threshold ranges of HSV components.
        cv.InRangeS(h_img, hmin, hmax, h_img)
        cv.InRangeS(s_img, smin, smax, s_img)
        cv.InRangeS(v_img, vmin, vmax, v_img)

        # Perform an AND on HSV components to identify the laser!
        cv.And(h_img, v_img, laser_img)
        # This actually Worked OK for me without using Saturation.
        #cv.cvAnd(laser_img, s_img,laser_img)

        # Merge the HSV components back together.
        cv.Merge(h_img, s_img, v_img, None, hsv_image)

        #-----------------------------------------------------
        # NOTE: default color space in OpenCV is BGR!!
        # we can now display the images
        cv.ShowImage('Thresholded_HSV_Image', hsv_image)
        cv.ShowImage('RGB_VideoFrame', frame)
        cv.ShowImage('Hue', h_img)
        cv.ShowImage('Saturation', s_img)
        cv.ShowImage('Value', v_img)
        cv.ShowImage('LaserPointer', laser_img)

        # handle events
        k = cv.WaitKey(10)

        if k == '\x1b' or k == 'q':
            # user has press the ESC key, so exit
            break
Beispiel #28
0
        for i in range(len(hist)-1):
            cur_value  = hist[i]
            next_value = hist[i + 1]
            
            # still walking up the hill
            if cur_value > hist[end]:
                end = i

            # arrived in a valley
            if ((cur_value < perc * hist[end]) & (1.1 * cur_value < next_value)):

                # cut out a certain depth layer
                cv.Threshold(for_thresh, min_thresh, bins[start], 255, cv.CV_THRESH_BINARY)
                cv.Threshold(for_thresh, max_thresh, bins[i], 255, cv.CV_THRESH_BINARY_INV)
                cv.And(min_thresh, max_thresh, and_thresh)
                
                # erode the layer and find contours
                if erode:
                    cv.Erode(and_thresh, and_thresh, elem)
                conts = cv.FindContours(and_thresh, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)
                
                # collect all interesting contours in a list
                while conts:
                    if draw_cluster:
                        cv.FillPoly(contours, [conts], color_tab[c])

                    if len(conts) > cont_length and min_cont_area < cv.ContourArea(conts) < max_cont_area:
                        conts_list.append(list(conts))
                    conts = conts.h_next()
                
Beispiel #29
0
    circles = np.asarray(storage)
    print len(circles), 'circles found'
    for circle in circles:
        Radius, x, y = int(circle[0][2]), int(circle[0][0]), int(circle[0][1])
        cv.Circle(output, (x, y), 1, cv.CV_RGB(0, 255, 0), -1, 8, 0)
        cv.Circle(output, (x, y), Radius, cv.CV_RGB(255, 0, 0), 3, 8, 0)


#split image into RGB components
cv.Split(orig, rrr, ggg, bbb, None)
#process each component
channel_processing(rrr)
channel_processing(ggg)
channel_processing(bbb)
#combine images using logical 'And' to avoid saturation
cv.And(rrr, ggg, rrr)
cv.And(rrr, bbb, processed)
cv.ShowImage('before canny', processed)
# cv.SaveImage('case3_processed.jpg',processed)
#use canny, as HoughCircles seems to prefer ring like circles to filled ones.
cv.Canny(processed, processed, 5, 70, 3)
#smooth to reduce noise a bit more
cv.Smooth(processed, processed, cv.CV_GAUSSIAN, 7, 7)
cv.ShowImage('processed', processed)
#find circles, with parameter search
storage = find_circles(processed, storage, 100)
draw_circles(storage, output)
# show images
cv.ShowImage("original with circles", output)
cv.SaveImage('case1.jpg', output)
def and_planes(planes):
    assert len(planes) > 0
    res = planes[0]
    for plane in planes[1:]:
        cv.And(plane, res, res)
    return res