예제 #1
0
def repaintCCs(image, doRepaint=None, returnMask=False, resizeMask=True, doFillBackground=True, bgPoint=(0, 0), newcol=255, connectivity=4):
    if doRepaint is None:
        doRepaint = lambda comp, col: False
    resultMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    tempMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    visitMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    cv.Zero(resultMask)
    cv.Zero(tempMask)
    cv.Zero(visitMask)
    if doFillBackground:
        cv.FloodFill(image, bgPoint, 0, 0, 0, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), visitMask)
    for x in xrange(image.width):
        for y in xrange(image.height):
            if visitMask[y + 1, x + 1] == 255:
                continue
            comp = cv.FloodFill(image, (x, y), 0, 0, 0, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), tempMask)
            region = shiftRect(comp[2], 1, 1)
            cv.SetImageROI(tempMask, region)
            cv.SetImageROI(visitMask, region)
            cv.Or(tempMask, visitMask, visitMask)
            if doRepaint(comp, image[y, x]):
                cv.SetImageROI(resultMask, region)
                cv.Or(tempMask, resultMask, resultMask)
                cv.ResetImageROI(resultMask)
            cv.Zero(tempMask)
            cv.ResetImageROI(tempMask)
            cv.ResetImageROI(visitMask)
    if returnMask:
        if resizeMask: return cap.getSubImage(resultMask, (1, 1, image.width, image.height))
        else: return resultMask
    else:    
        cv.SetImageROI(resultMask, (1, 1, image.width, image.height))
        cv.Set(image, newcol, resultMask)
        return image
예제 #2
0
def removeLightColors(image):
    b = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    g = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    r = cv.CreateImage(cv.GetSize(image), image.depth, 1)
    cv.Split(image, b, g, r, None)
    cv.Threshold(b, b, 154, 255, cv.CV_THRESH_BINARY)
    cv.Threshold(g, g, 154, 255, cv.CV_THRESH_BINARY)
    cv.Threshold(r, r, 154, 255, cv.CV_THRESH_BINARY)
    cv.Or(b, g, b)
    cv.Or(b, r, b)
    cv.Set(image, cv.ScalarAll(255), b)
    return image
예제 #3
0
def main(args):
    if args.output_directory:
        directory = args.output_directory
    else:
        directory = os.path.dirname(args.input_image)
        print "No output directory specified. Defaulting to %s" % directory
    if not os.path.exists(directory):
        os.makedirs(directory)
    if args.output_prefix:
        prefix = args.output_prefix
        extension = os.path.splitext(os.path.basename(args.input_image))[1]
    else:
        prefix, extension = os.path.splitext(os.path.basename(
            args.input_image))
        print "No output prefix selected. Defaulting to %s" % prefix
    output_file = "%s/%s%s" % (directory, prefix, extension)

    image = cv.LoadImage(args.input_image)
    input_image = cv.LoadImage(args.input_image)
    mask = cv.CreateImage(cv.GetSize(input_image), 8, 1)
    image_hue = cv.CreateImage(cv.GetSize(input_image), 8, 1)

    image_hsv = cv.CreateImage(cv.GetSize(input_image), 8, 3)

    cv.CvtColor(input_image, image_hsv, cv.CV_BGR2HSV)
    cv.Split(image_hsv, image_hue, None, None, None)
    upper_thresh = cv.CreateImage(cv.GetSize(input_image), 8, 1)
    lower_thresh = cv.CreateImage(cv.GetSize(input_image), 8, 1)

    cv.Threshold(image_hue, upper_thresh, args.hue_max, 255,
                 cv.CV_THRESH_BINARY)
    cv.Threshold(image_hue, lower_thresh, args.hue_min, 255,
                 cv.CV_THRESH_BINARY_INV)
    cv.Or(upper_thresh, lower_thresh, mask)
    cv.SaveImage(output_file, mask)
def backgroundDiff(img, Imask):
    cv.CvtScale(img, Iscratch, 1, 0)
    cv.Split(Iscratch, Igray1, Igray2, Igray3, None)
    cv.InRange(Igray1, Ilow1, Ihi1, Imask)

    cv.InRange(Igray2, Ilow2, Ihi2, Imaskt)
    cv.Or(Imask, Imaskt, Imask)

    cv.InRange(Igray3, Ilow3, Ihi3, Imaskt)
    cv.Or(Imask, Imaskt, Imask)

    cv.SubRS(Imask, 255, Imask)
    cv.SaveImage('/home/mkillpack/Desktop/mask.png', Imask)
    #cv.Erode(Imask, Imask)
    print "here is the sum of the non-zero pixels", cv.Sum(Imask)
    return Imask
예제 #5
0
def removeBadBackground(seg):
    threshUp = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    comparison = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    visitMask = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    ffMask = cv.CreateImage((seg.width + 2, seg.height + 2), cv.IPL_DEPTH_8U,
                            1)
    cv.Threshold(seg, threshUp, 1, 255, cv.CV_THRESH_BINARY)
    cv.Zero(visitMask)
    cv.Zero(ffMask)
    for x in xrange(seg.width):
        for y in xrange(seg.height):
            if seg[y, x] != 96 or visitMask[y, x] == 255: continue
            comp = cv.FloodFill(threshUp, (x, y), 0, 0, 0,
                                4 + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8),
                                ffMask)
            rect = comp[2]
            cv.SetImageROI(ffMask, cap.shiftRect(rect, 1, 1))
            cv.OrS(ffMask, 1, ffMask)
            cv.SetImageROI(seg, rect)
            cv.SetImageROI(comparison, rect)
            cv.Cmp(
                seg, ffMask, comparison,
                cv.CV_CMP_EQ)  # 'comparison' does not need to be zeroed later
            intersect = cv.CountNonZero(comparison)
            cv.SetImageROI(visitMask, rect)
            cv.Or(visitMask, ffMask, visitMask)
            cv.ResetImageROI(visitMask)
            if intersect == 0:
                cv.Set(seg, 0, ffMask)
            cv.Zero(ffMask)
            cv.ResetImageROI(seg)
            cv.ResetImageROI(ffMask)
    return seg
예제 #6
0
def findFirstColorPattern(img, pattern):
    """
        try to test if one pixel is in our pattern
    """
    channels = [None, None, None]
    channels[0] = cv.CreateImage(cv.GetSize(img), 8, 1)  #blue
    channels[1] = cv.CreateImage(cv.GetSize(img), 8, 1)  #green
    channels[2] = cv.CreateImage(cv.GetSize(img), 8, 1)  #red
    ch0 = cv.CreateImage(cv.GetSize(img), 8, 1)  #blue
    ch1 = cv.CreateImage(cv.GetSize(img), 8, 1)  #green
    ch2 = cv.CreateImage(cv.GetSize(img), 8, 1)  #red
    cv.Split(img, ch0, ch1, ch2, None)
    dest0 = cv.CreateImage(cv.GetSize(img), 8, 1)
    dest1 = cv.CreateImage(cv.GetSize(img), 8, 1)
    dest2 = cv.CreateImage(cv.GetSize(img), 8, 1)
    dest3 = cv.CreateImage(cv.GetSize(img), 8, 1)
    cv.Smooth(ch0, channels[0], cv.CV_GAUSSIAN, 3, 3, 0)
    cv.Smooth(ch1, channels[1], cv.CV_GAUSSIAN, 3, 3, 0)
    cv.Smooth(ch2, channels[2], cv.CV_GAUSSIAN, 3, 3, 0)
    result = []
    for i in range(3):
        lower = pattern[i][2] - 25
        upper = pattern[i][2] + 25
        cv.InRangeS(channels[0], lower, upper, dest0)
        lower = pattern[i][1] - 25
        upper = pattern[i][1] + 25
        cv.InRangeS(channels[1], lower, upper, dest1)
        lower = pattern[i][0] - 25
        upper = pattern[i][0] + 25
        cv.InRangeS(channels[2], lower, upper, dest2)
        cv.And(dest0, dest1, dest3)
        temp = cv.CreateImage(cv.GetSize(img), 8, 1)
        cv.And(dest2, dest3, temp)
        result.append(temp)

    cv.ShowImage("result0", result[0])
    cv.WaitKey(0)
    cv.ShowImage("result1", result[1])
    cv.WaitKey(0)
    cv.ShowImage("result2", result[2])
    cv.WaitKey(0)
    cv.Or(result[0], result[1], dest0)
    cv.Or(dest0, result[2], dest3)
    cv.NamedWindow("result", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("result", dest3)
    cv.WaitKey(0)
    return dest3
예제 #7
0
def get_filtered_plane(plane, filter_ranges, ranges_func):
    mask = image_empty_clone(plane)
    for start, stop in filter_ranges:
        start, stop = ranges_func(start, stop)  #0.3, 1.0
        tmp = image_empty_clone(plane)
        cv.InRangeS(plane, start, stop, tmp)
        cv.Or(mask, tmp, mask)
    return mask
예제 #8
0
    def handle_clicked_nonbuilding(self, x, y):
        def equivalent_region_nearest(nearest_building):
            ''' find the equivalent class region via nearest building
            '''
            equ_class_region = cv.CreateImage(
                (self.map_img.width, self.map_img.height), cv.IPL_DEPTH_8U, 1)
            bid = nearest_building.bid
            cv.CmpS(self.buildingMgr.empty_img, bid, equ_class_region,
                    cv.CV_CMP_EQ)
            return equ_class_region

        def draw_region(equ_class_region, color=im.color.red):
            c = im.find_contour(equ_class_region)
            while c:
                cv.FillPoly(self.show_img, [list(c)], color)
                c = c.h_next()

        blds = self.buildingMgr.get_near_buildings(x, y)
        if not blds:
            blds = [self.buildingMgr.get_nearest_building(x, y)]
        # update clicked states
        if self.click_state == 'START':
            self.click_state = 'FROM_CLICKED'
        elif self.click_state == 'FROM_CLICKED':
            if self.last_clicked != set(blds):
                self.click_state = 'TO_CLICKED'
        else:  # click_state == 'TO_CLICKED'
            self.click_state = 'FROM_CLICKED'

        reg_color = {
            'FROM_CLICKED': im.color.red,
            'TO_CLICKED': im.color.blue
        }[self.click_state]
        fill_bld_color = {
            'FROM_CLICKED': im.color.pink,
            'TO_CLICKED': im.color.lightblue
        }[self.click_state]

        # equivalent class region
        equ_class_region = self.buildingMgr.get_near_region_mask(blds)
        if len(blds) == 1:
            cv.Or(equ_class_region, equivalent_region_nearest(blds[0]),
                  equ_class_region)
        draw_region(equ_class_region, color=reg_color)
        # fill nearby buildings
        for bd in blds:
            bd.fillme(self.show_img, fill_bld_color)
        self.generate_position_desc(blds)

        im.drawtext(self.show_img,
                    "(%g, %g, 0)" % (x, y),
                    x + 10,
                    y + 10,
                    font=self.font,
                    color=self.fontcolor)
        self.last_clicked = set(blds)
예제 #9
0
def main(args):
    image = cv.LoadImage(args.image)
    prefix, extension = os.path.splitext(args.image)
    output_path = "%s.mask.png" % (prefix)
    if os.path.exists(output_path):
        mask = cv.LoadImage(output_path, cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        mask = cv.CreateImage((image.width, image.height), 8, 1)
        cv.Set(mask, 255)
    if args.threshold:
        print "Thresholding"
        image_hue = cv.CreateImage(cv.GetSize(image), 8, 1)
        image_sat = cv.CreateImage(cv.GetSize(image), 8, 1)
        image_val = cv.CreateImage(cv.GetSize(image), 8, 1)

        image_hsv = cv.CreateImage(cv.GetSize(image), 8, 3)

        mask_hue = cv.CreateImage((image.width, image.height), 8, 1)
        mask_val = cv.CreateImage((image.width, image.height), 8, 1)
        mask_new = cv.CreateImage((image.width, image.height), 8, 1)
        cv.CvtColor(image, image_hsv, cv.CV_BGR2HSV)
        cv.Split(image_hsv, image_hue, image_sat, image_val, None)
        upper_thresh_hue = cv.CreateImage(cv.GetSize(image), 8, 1)
        lower_thresh_hue = cv.CreateImage(cv.GetSize(image), 8, 1)
        upper_thresh_val = cv.CreateImage(cv.GetSize(image), 8, 1)
        lower_thresh_val = cv.CreateImage(cv.GetSize(image), 8, 1)
        cv.Threshold(image_hue, upper_thresh_hue, args.hue_max, 255,
                     cv.CV_THRESH_BINARY)
        cv.Threshold(image_hue, lower_thresh_hue, args.hue_min, 255,
                     cv.CV_THRESH_BINARY_INV)
        cv.Threshold(image_val, upper_thresh_val, 235, 255,
                     cv.CV_THRESH_BINARY)
        cv.Threshold(image_val, lower_thresh_val, 30, 255,
                     cv.CV_THRESH_BINARY_INV)
        cv.Or(upper_thresh_hue, lower_thresh_hue, mask_hue)
        cv.Or(upper_thresh_val, lower_thresh_val, mask_val)
        cv.Or(mask_hue, mask_val, mask_new)
        cv.And(mask, mask_new, mask)

    MaskWindow(image, mask, output_path, args.brush_size, args.zoom_out)
예제 #10
0
def joinComponents(components):
    rects = [comp[2] for comp in components]
    rect = (min([rect[0] for rect in rects]), min([rect[1] for rect in rects]), \
            max([rect[0] + rect[2] for rect in rects]), max([rect[1] + rect[3] for rect in rects]))
    resW = rect[2] - rect[0]
    resH = rect[3] - rect[1]
    result = cv.CreateImage((resW, resH), cv.IPL_DEPTH_8U, 1)
    cv.Zero(result)
    for comp in components:
        region = cv.GetSubRect(result, shiftRect(comp[2], -rect[0], -rect[1]))
        cv.Or(comp[3], region, region, None)
    return [sum([comp[0] for comp in components]), 255.0, \
            (rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1]), result]
예제 #11
0
    def getAllThresh(self, imgs):
        # open 1 image to get the size
        img = cv.LoadImage(imgs[0])
        allThresh = cv.CreateImage(cv.GetSize(img), 8, 1)

        for i in imgs:
            print "processing", i
            threshed = self.getThreshold(i)
            tmp = cv.CreateImage(cv.GetSize(img), 8, 1)
            cv.Copy(allThresh, tmp)
            cv.Or(tmp, threshed, allThresh)
        savename = '/'.join(imgs[0].split('/')[:-1]) + '/Thresholded.png'
        cv.SaveImage(savename, allThresh)
        return savename
예제 #12
0
    def calc_stats(self):
        cv.NamedWindow("noise", cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow("img1_back", cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow("img2_back", cv.CV_WINDOW_AUTOSIZE)
        self.check_for_hist()
        self.avg_noise = cv.CreateImage(cv.GetSize(self.background_noise[0]),
                                        8, 1)
        cv.Zero(self.avg_noise)

        for i in xrange(len(self.background_noise) - 1):
            cv.ShowImage("noise", self.avg_noise)
            back_proj_img1, hist1 = self.back_project_hs(
                self.background_noise[i])
            back_proj_img2, hist2 = self.back_project_hs(
                self.background_noise[i + 1])

            self.accumulateBackground(back_proj_img1)

            cv.ShowImage("img1_back", back_proj_img1)
            cv.ShowImage("img2_back", back_proj_img2)
            scratch = cv.CreateImage(cv.GetSize(back_proj_img2), 8, 1)
            scratch2 = cv.CreateImage(cv.GetSize(back_proj_img2), 8, 1)

            # do something clever with ands ors and diffs
            cv.Zero(scratch)
            cv.Zero(scratch2)
            cv.Sub(back_proj_img2, back_proj_img1,
                   scratch2)  #noise, but includes object if failed,

            #cv.Sub(scratch2, self.avg_noise, scratch)
            #cv.Or(self.avg_noise, scratch2, self.avg_noise)

            cv.Or(self.avg_noise, scratch2, self.avg_noise)

            cv.ShowImage("diff_back", scratch2)
            cv.ShowImage("diff_noise_scratch", scratch)

            cv.WaitKey(-1)
        self.createModelsfromStats()
        print self.Icount
        cv.NamedWindow("Ilow", cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow("Ihi", cv.CV_WINDOW_AUTOSIZE)
        cv.NamedWindow("IavgF", cv.CV_WINDOW_AUTOSIZE)

        cv.ShowImage("Ihi", self.IhiF)
        cv.ShowImage("Ilow", self.IlowF)
        cv.ShowImage("IavgF", self.IavgF)
예제 #13
0
 def getThreshold(self, image):
     #http://www.aishack.in/2010/07/tracking-colored-objects-in-opencv/
     #http://nashruddin.com/OpenCV_Region_of_Interest_(ROI)
     img = cv.LoadImage(image)
     imgHSV = cv.CreateImage(cv.GetSize(img), 8, 3)
     cv.CvtColor(img, imgHSV, cv.CV_BGR2HSV)
     # Since our pink/red values are from h=0-20 and h=160-179, we have to do thresholding in
     # two steps...maybe there is an easier way...
     imgThreshed1 = cv.CreateImage(cv.GetSize(img), 8, 1)
     imgThreshed2 = cv.CreateImage(cv.GetSize(img), 8, 1)
     cv.InRangeS(imgHSV, cv.Scalar(0, 50, 50), cv.Scalar(20, 255, 255),
                 imgThreshed1)
     cv.InRangeS(imgHSV, cv.Scalar(160, 50, 50), cv.Scalar(179, 255, 255),
                 imgThreshed2)
     imgThreshed = cv.CreateImage(cv.GetSize(img), 8, 1)
     cv.Or(imgThreshed1, imgThreshed2, imgThreshed)
     return imgThreshed
예제 #14
0
파일: corner.py 프로젝트: mroja/eyetracker
    def get_candidates(self, m_d):
        '''
        Get candidates for this corner from new image
        @param m_d: marker_detector
        '''
        # if this corner is wider then MAX_CORNER_ANGLE, we probably won't
        # find it anyway. Instead lets find narrow corners and calculate its
        # position
        if self.angle > MAX_CORNER_ANGLE: return []
        cr = self.get_rectangle(m_d)
        cr = correct_rectangle(cr, m_d.size)
        if cr is None: return []
        m_d.set_ROI(cr)
        tmp_img = m_d.tmp_img
        gray_img = m_d.gray_img
        bw_img = m_d.bw_img
        canny = m_d.canny_img
        cv.Copy(gray_img, tmp_img)
        cv.Threshold(gray_img, bw_img, 125, 255, cv.CV_THRESH_OTSU)
        if self.black_inside > 0:
            cv.Not(bw_img, bw_img)
        cv.Canny(gray_img, canny, 300, 500)
        cv.Or(bw_img, canny, bw_img)
        tmpim = m_d.canny_img
        cv.Copy(bw_img, tmpim)
        cv.Set2D(tmpim, 1, 1, 255)
        conts = cv.FindContours(tmpim, cv.CreateMemStorage(),
                                cv.CV_RETR_EXTERNAL)
        cv.Zero(tmpim)
        m_d.set_ROI()
        cv.SetImageROI(tmpim, cr)
        result = []
        while conts:
            aconts = cv.ApproxPoly(conts, cv.CreateMemStorage(),
                                   cv.CV_POLY_APPROX_DP, 2)
            nconts = list(aconts)
            cv.PolyLine(tmpim, [nconts], True, (255, 255, 255))
            self._append_candidates_from_conts(cr, result, nconts, m_d)
            conts = conts.h_next()


#        print result
#        db.show([tmpim,m_d.draw_img], 'tmpim', 0, 0, 0)
        return result
예제 #15
0
def filterImage(im):
    # Size of the images
    (width, height) = size

    hsvFrame = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
    filter = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
    filter2 = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)

    hsvMin1 = cv.Scalar(0, 90, 130, 0)
    hsvMax1 = cv.Scalar(12, 256, 256, 0)

    hsvMin2 = cv.Scalar(170, 90, 130, 0)
    hsvMax2 = cv.Scalar(200, 256, 256, 0)

    # Color detection using HSV
    cv.CvtColor(im, hsvFrame, cv.CV_BGR2HSV)
    cv.InRangeS(hsvFrame, hsvMin1, hsvMax1, filter)
    cv.InRangeS(hsvFrame, hsvMin2, hsvMax2, filter2)
    cv.Or(filter, filter2, filter)
    return filter
예제 #16
0
    def threshold(self, thres_low, thres_high, thres_chan):
        result_val = 1  #Change result_val to 255 if need to view image

        cv.Threshold(thres_chan, self.thresholded_low, thres_low, result_val,
                     cv.CV_THRESH_BINARY)
        cv.Dilate(
            self.thresholded_low, self.thresholded_low
        )  #thresholded_low thresholded image using threshold for dark regions
        blob.remove_large_blobs(self.thresholded_low, self.max_area)

        cv.Threshold(thres_chan, self.thresholded_high, thres_high, result_val,
                     cv.CV_THRESH_BINARY)
        cv.Dilate(
            self.thresholded_high, self.thresholded_high
        )  #thresholded_high thresholded image using threshold for bright regions
        blob.remove_large_blobs(self.thresholded_high,
                                self.max_area)  #, show=True)

        cv.Or(self.thresholded_low, self.thresholded_high,
              self.thresholded_combined)
        return self.thresholded_combined
예제 #17
0
def preprocess(image, addr, extras):
    log = cap.logger(extras, image)
    image = removeLightColors(image)
    log.log(image)
    image = remapColors(image)
    log.log(image)
    image = smoothNoise2(image)
    log.log(image)
    image = cap.smoothNoise1(image)
    log.log(image)
    mask = getNoiseMask(image, 15, 4)
    cv.Or(mask, findColor(image, myunkn), mask)
    log.log(mask)
    image = doInpaint(image, mask)
    log.log(image)
    image = sharpenColors(image)
    log.log(image)
    image = cap.repaintCCs(image,
                           doRepaint=lambda comp, col: comp[0] <= 5 or comp[2][
                               2] <= 2 or comp[2][3] <= 2)
    log.log(image)
    cap.processExtras(log.steps, addr, extras, cap.CAP_STAGE_PREPROCESS)
    return image
예제 #18
0
    def init_empty_img(self):
        # calculate Union{ near_regions } for all building
        nearby_region_polys = [bd.near_region_poly for bd in self.buildings]
        all_near_regions = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        cv.FillPoly(all_near_regions, [nearby_region_polys[0]], im.color.blue)
        for poly in nearby_region_polys:
            tmp_canvas = cv.CreateImage(
                (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U,
                1)
            cv.FillPoly(tmp_canvas, [poly], im.color.blue)
            cv.Or(tmp_canvas, all_near_regions, all_near_regions)

        # find the "empty" region
        empty_region = cv.CreateImage(
            (self.label_img.width, self.label_img.height), cv.IPL_DEPTH_8U, 1)
        cv.CmpS(all_near_regions, 0, empty_region, cv.CV_CMP_EQ)

        for ele in it.nonzero_indices(cv.GetMat(empty_region)):
            y, x = ele
            y, x = int(y), int(x)
            nearest_bd = self.get_nearest_building(x, y)
            cv.Set2D(empty_region, y, x, nearest_bd.bid)
        return empty_region
예제 #19
0
파일: ball.py 프로젝트: NoSobh/naovita
class RobotVision:
    #size = None
    #cvImage hsv_frame, thresholded, thresholded2
    #cvScalar hsv_min, hsv_max, hsv_min2, hsv_max2
    #cvCapture capture;

    if __name__ == '__main__':

        #globals size,  hsv_frame, thresholded, thresholded2, hsv_min, hsv_max, hsv_min2, hsv_max2, capture
        print "Initializing ball Tracking"
        size = cv.Size(640, 480)
        hsv_frame = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
        thresholded = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        thresholded2 = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)

        hsv_min = cv.Scalar(0, 50, 170, 0)
        hsv_max = cv.Scalar(10, 180, 256, 0)
        hsv_min2 = cv.Scalar(170, 50, 170, 0)
        hsv_max2 = cv.Scalar(256, 180, 256, 0)

        storage = cv.CreateMemStorage(0)

        # start capturing form webcam
        capture = cv.CreateCameraCapture(0)

        if not capture:
            print "Could not open webcam"
            sys.exit(1)

        #CV windows
        cv.NamedWindow("Camera", cv.CV_WINDOW_AUTOSIZE)

        #globals size,  hsv_frame, thresholded, thresholded2, hsv_min, hsv_max, hsv_min2, hsv_max2, capture
        while True:
            # get a frame from the webcam
            frame = cv.QueryFrame(capture)

            if frame is not None:
                # convert to HSV for color matching
                # as hue wraps around, we need to match it in 2 parts and OR together
                cv.CvtColor(frame, hsv_frame, cv.CV_BGR2HSV)
                cv.InRangeS(hsv_frame, hsv_min, hsv_max, thresholded)
                cv.InRangeS(hsv_frame, hsv_min2, hsv_max2, thresholded2)
                cv.Or(thresholded, thresholded2, thresholded)

                # pre-smoothing improves Hough detector
                cv.Smooth(thresholded, thresholded, cv.CV_GAUSSIAN, 9, 9)
                circles = cv.HoughCircles(thresholded, storage,
                                          cv.CV_HOUGH_GRADIENT, 2,
                                          thresholded.height / 4, 100, 40, 20,
                                          200)

                # find largest circle
                maxRadius = 0
                x = 0
                y = 0
                found = False
                for i in range(circles.total):
                    circle = circles[i]
                    if circle[2] > maxRadius:
                        found = True
                        maxRadius = circle[2]
                        x = circle[0]
                        y = circle[1]

                cv.ShowImage("Camera", frame)

                if found:
                    print "ball detected at position:", x, ",", y, " with radius:", maxRadius

                else:
                    print "no ball"
예제 #20
0
def DetectaSombra(frame, bg):

    dbg = 1

    if dbg:
        t1 = time.time()

    print 'Detectando sombras na imagem...'

    # gera as imagens de cada canal RGB
    imgCinza = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    imgHSV = cv.CloneImage(frame)

    imgH = cv.CloneImage(imgCinza)
    imgS = cv.CloneImage(imgCinza)
    imgV = cv.CloneImage(imgCinza)

    imgR = cv.CloneImage(imgCinza)
    imgG = cv.CloneImage(imgCinza)
    imgB = cv.CloneImage(imgCinza)

    bgCinza = cv.CreateImage(cv.GetSize(bg), cv.IPL_DEPTH_8U, 1)
    bgHSV = cv.CloneImage(bg)

    bgH = cv.CloneImage(bgCinza)
    bgS = cv.CloneImage(bgCinza)
    bgV = cv.CloneImage(bgCinza)

    bgR = cv.CloneImage(bgCinza)
    bgG = cv.CloneImage(bgCinza)
    bgB = cv.CloneImage(bgCinza)

    # gera as imagens de cada frame e backgroun nos canais de HSV e RGB
    cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)
    cv.Split(imgHSV, imgH, imgS, imgV, None)
    cv.Split(frame, imgR, imgG, imgB, None)

    cv.CvtColor(bg, bgHSV, cv.CV_BGR2HSV)
    cv.Split(bgHSV, bgH, bgS, bgV, None)
    cv.Split(bg, bgR, bgG, bgB, None)

    # inicio de calculos para descobrir sombras.
    ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.Div(imgV, bgV, ivbv, 255)

    isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.Sub(imgS, bgS, isbs)

    ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.AbsDiff(imgH, bgH, ihbh)

    # parametros de deteccao de sombra
    alfa = 190
    beta = 210

    thrSat = 20
    thrHue = 50

    alfa = 220
    beta = 240

    thrSat = 90
    thrHue = 90

    nErode = 0
    nDilate = 0

    # trata ivbv
    imgThr_ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que beta
    cv.Threshold(ivbv, imgThr_ivbv, beta, 255, cv.CV_THRESH_TRUNC)
    # deixa apenas os maiores que alfa
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_TOZERO)
    # binariza
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_BINARY)

    # trata isbs
    imgThr_isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que thrSat
    cv.Threshold(isbs, imgThr_isbs, thrSat, 255, cv.CV_THRESH_BINARY)

    # trata isbs
    imgThr_ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que thrSat
    cv.Threshold(ihbh, imgThr_ihbh, thrHue, 255, cv.CV_THRESH_BINARY_INV)

    # onde é preto em todas as imagens, é sombra
    imgSombra = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

    cv.Not(imgThr_ivbv, imgThr_ivbv)
    cv.Not(imgThr_isbs, imgThr_isbs)

    cv.And(imgThr_ivbv, imgThr_isbs, imgSombra)

    cv.Not(imgThr_ihbh, imgThr_ihbh)

    cv.And(imgSombra, imgThr_ihbh, imgSombra)

    for i in range(nErode):
        cv.Erode(imgSombra, imgSombra)

    for i in range(nDilate):
        cv.Dilate(imgSombra, imgSombra)

    if dbg:
        print 'Tempo para detectar sombras: %.5f' % (time.time() - t1)
    #exibe frames de saida

    #destaca de verde a sombra sobre o frame
    frameDestacado = cv.CloneImage(frame)

    cv.Or(imgG, imgSombra, imgG)

    cv.Merge(imgR, imgG, imgB, None, frameDestacado)
    '''    
    cv.ShowImage('frameDestacado',frameDestacado)
    cv.WaitKey()
    '''

    retorno = {}
    retorno['sombra'] = imgSombra
    retorno['sombraDestacada'] = frameDestacado

    return retorno

    cv.ShowImage('ivbv', ivbv)
    cv.ShowImage('isbs', isbs)
    cv.ShowImage('ihbh', ihbh)

    cv.ShowImage('imgThr_isbs', imgThr_isbs)
    cv.ShowImage('imgThr_ivbv', imgThr_ivbv)
    cv.ShowImage('imgThr_ihbh', imgThr_ihbh)

    cv.ShowImage('imgSombra', imgSombra)

    cv.WaitKey()

    sys.exit()

    frameMerge = cv.CloneImage(frame)
    cv.Merge(imgR, imgR, imgR, None, frameMerge)

    cv.ShowImage('frame', frame)
    cv.ShowImage('frameMerge', frameMerge)

    cv.ShowImage('imgR', imgR)
    cv.ShowImage('imgG', imgG)
    cv.ShowImage('imgB', imgB)

    cv.ShowImage('imgH', imgH)
    cv.ShowImage('imgS', imgS)
    cv.ShowImage('imgV', imgV)

    cv.WaitKey()

    return 0
예제 #21
0
def threshold(image,
              bg_mode,
              filter_pr2,
              crop_rect=None,
              cam_info=None,
              listener=None,
              hue_interval=(0, 180)):
    image_hsv = cv.CloneImage(image)
    cv.CvtColor(image, image_hsv, cv.CV_RGB2HSV)  #TODO: THIS SHOULD BE BGR
    image_hue = cv.CreateImage(cv.GetSize(image_hsv), 8, 1)
    image_gray = cv.CreateImage(cv.GetSize(image_hsv), 8, 1)
    cv.CvtColor(image, image_gray, cv.CV_RGB2GRAY)
    cv.Split(image_hsv, image_hue, None, None, None)
    image_thresh = cv.CloneImage(image_gray)
    hue_low = hue_interval[0]
    hue_up = hue_interval[1]
    if bg_mode == GREEN_BG:
        upper_thresh = cv.CloneImage(image_hue)
        lower_thresh = cv.CloneImage(image_hue)
        black_thresh = cv.CloneImage(image_hue)
        cv.Threshold(
            image_hue, upper_thresh, 80, 255,
            cv.CV_THRESH_BINARY)  #upper_thresh = white for all h>80, black o/w
        cv.Threshold(image_hue, lower_thresh, 40, 255, cv.CV_THRESH_BINARY_INV
                     )  #lower_thresh = white for all h<30, black o/w
        cv.Threshold(image_gray, black_thresh, 1, 255, cv.CV_THRESH_BINARY
                     )  #black_thresh = black for pure black, white o/w
        #Filter out the green band of the hue
        cv.Or(upper_thresh, lower_thresh,
              image_thresh)  #image_thresh = white for all h<30 OR h>80
        #Filter out pure black, for boundaries in birdseye
        cv.And(
            image_thresh, black_thresh, image_thresh
        )  #image_thresh = white for all non-pure-black pixels and (h<30 or h>80)

    elif bg_mode == WHITE_BG:
        cv.Threshold(image_gray, image_thresh, 250, 255,
                     cv.CV_THRESH_BINARY_INV
                     )  #image_gray = white for all non-super white, black o/w
    elif bg_mode == YELLOW_BG:
        upper_thresh = cv.CloneImage(image_hue)
        lower_thresh = cv.CloneImage(image_hue)
        black_thresh = cv.CloneImage(image_hue)
        cv.Threshold(image_hue, upper_thresh, 98, 255, cv.CV_THRESH_BINARY)
        cv.Threshold(image_hue, lower_thresh, 85, 255, cv.CV_THRESH_BINARY_INV)
        cv.Threshold(image_gray, black_thresh, 1, 255, cv.CV_THRESH_BINARY)
        #Filter out the yellow band of the hue
        cv.Or(upper_thresh, lower_thresh,
              image_thresh)  #image_thresh = white for all h<85 OR h>98
        #Filter out pure black, for boundaries in birdseye
        cv.And(
            image_thresh, black_thresh, image_thresh
        )  #image_thresh = white for all non-pure-black pixels and (h<30 or h>80)
    elif bg_mode == CUSTOM:
        upper_thresh = cv.CloneImage(image_hue)
        lower_thresh = cv.CloneImage(image_hue)
        black_thresh = cv.CloneImage(image_hue)
        cv.Threshold(image_hue, upper_thresh, hue_up, 255, cv.CV_THRESH_BINARY)
        cv.Threshold(image_hue, lower_thresh, hue_low, 255,
                     cv.CV_THRESH_BINARY_INV)
        cv.Threshold(image_gray, black_thresh, 1, 255, cv.CV_THRESH_BINARY)
        #Filter out the selected band of the hue
        cv.Or(upper_thresh, lower_thresh,
              image_thresh)  #image_thresh = white for all h outside range
        #Filter out pure black, for boundaries in birdseye
        cv.And(
            image_thresh, black_thresh, image_thresh
        )  #image_thresh = white for all non-pure-black pixels and h outside range)
        cv.Erode(image_thresh, image_thresh)  #Opening to remove noise
        cv.Dilate(image_thresh, image_thresh)
    #set all pixels outside the crop_rect to black
    if crop_rect:
        (x, y, width, height) = crop_rect
        for j in range(image_thresh.height):
            for i in range(x):
                image_thresh[j, i] = 0
            for i in range(x + width, image_thresh.width):
                image_thresh[j, i] = 0
        for i in range(image_thresh.width):
            for j in range(y):
                image_thresh[j, i] = 0
            for j in range(y + height, image_thresh.height):
                image_thresh[j, i] = 0

    if filter_pr2:
        #Filter out grippers
        cam_frame = cam_info.header.frame_id
        now = rospy.Time.now()
        for link in ("l_gripper_l_finger_tip_link",
                     "r_gripper_l_finger_tip_link"):
            listener.waitForTransform(cam_frame, link, now,
                                      rospy.Duration(10.0))
            l_grip_origin = PointStamped()
            l_grip_origin.header.frame_id = link
            l_grip_in_camera = listener.transformPoint(cam_frame,
                                                       l_grip_origin)
            camera_model = image_geometry.PinholeCameraModel()
            camera_model.fromCameraInfo(cam_info)
            (u, v) = camera_model.project3dToPixel(
                (l_grip_in_camera.point.x, l_grip_in_camera.point.y,
                 l_grip_in_camera.point.z))
            if link[0] == "l":
                x_range = range(0, u)
            else:
                x_range = range(u, image_thresh.width)
            if 0 < u < image_thresh.width and 0 < v < image_thresh.height:
                for x in x_range:
                    for y in range(0, image_thresh.height):
                        image_thresh[y, x] = 0.0
    save_num = 0
    cv.SaveImage("/tmp/thresholded_%d.png" % save_num, image_thresh)
    save_num = save_num + 1
    return image_thresh
예제 #22
0
    def find_better_point(self, from_, direction, predicted_length, range=20):
        '''
        Tries to find better corner arm - goes from from_ using vector direction
        on a line to find last visible point on this line
        @param from_:
        @param tpredicted_length: predicted length of this side
        @param direction:
        '''
        img = self.bw_img
        timg = self.tmp_img
        L1 = predicted_length * 1.2
        vec = direction
        L2 = length(vec)
        vec = add((0, 0), vec, L1 / L2) #vector towards direction of length of old side
        vec1 = rotateVec(vec, d2r(range))
        vec2 = rotateVec(vec, d2r(-range))
        x, y = from_
        cv.ResetImageROI(img)
        size = cv.GetSize(img)

        border_points = [add(from_, vec1), add(from_, vec2), (x - 5, y - 5), (x + 5, y + 5)]
        (x, y, wx, wy) = cv.BoundingRect(border_points)
        crect = correct_rectangle((x - 3, y - 3, wx + 6, wy + 6), size)
        [cv.SetImageROI(i, crect) for i in [img, timg, self.gray_img]]
        self.bounds.extend(cvrect(crect))
        cv.Threshold(self.gray_img, img, 125, 255, cv.CV_THRESH_OTSU)
        cv.Not(img, timg)
        cv.Canny(self.gray_img, img, 300, 500)
        cv.Or(img, timg,timg)
        rect = cvrect(crect)
        cv.Set2D(timg, 1, 1, (30, 30, 30))
        conts = cv.FindContours(timg, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL)
        db.DrawContours(timg, conts, (255, 255, 255), (128, 128, 128), 10)
        cv.Zero(timg)
        fr = add(from_, rect[0], -1)
        ans = []
        while conts:
            cont = cv.ApproxPoly(conts, cv.CreateMemStorage(), cv.CV_POLY_APPROX_DP, parameter=2, parameter2=0)
            cv.DrawContours(timg, cont, (255, 255, 255), (128, 128, 128), 10)
            cont = list(cont)
            L = len(cont)
            for i, p in enumerate(cont):
                if length(vector(fr, p)) < 5:
                    prev = cont[(i - 1 + L) % L]
                    next = cont[(i + 1) % L]
                    ans.append(vector(fr, prev))
                    ans.append(vector(fr, next))
            conts = conts.h_next()
        [cv.ResetImageROI(i) for i in [self.gray_img, timg, img]]
        if len(ans) == 0:
            # we didn't find corresponding corner,
            # that means it wasn't really a corner
            return None
        if len(ans) == 2 and ans[0] == ans[1]: return add(from_, direction)
        min = math.pi
        min_index = 0
        for i, v in enumerate(ans):
            tmp = vectorAngle(vec, v)
            if tmp < min:
                min = tmp
                min_index = i

        ans = ans[min_index]

        if length(ans)+1< L2:
            # the point we just found is closer then the previous one
            return add(from_,direction)

        abs_point = add(from_, ans)
        if point_on_edge(abs_point, crect):
            if not point_on_edge(abs_point, (0, 0, size[0], size[1])):
                if range < 20:
                    # this is recurence call. When we are here it means that
                    # side is longer then expected by over 2 times - it is not
                    # the side we are looking for- corner is not valid
                    return None
                else:
                    return self.find_better_point(from_, abs_point,
                                                  predicted_length * 2, 5)

        return abs_point
예제 #23
0
    mr = r * math.sqrt(2)
    y += mr * 1.8
    test += [(str(deg) + "abcdefgh"[j], (50 + deg * 11, y),
              math.pi * deg / 180, r) for deg in range(0, 90, 10)]

for (msg, (x, y), angle, r) in test:
    map = cv.CreateMat(2, 3, cv.CV_32FC1)
    corners = [(x + r * math.cos(angle + th), y + r * math.sin(angle + th))
               for th in [0, math.pi / 2, math.pi, 3 * math.pi / 4]]
    src = mkdmtx(msg)
    (sx, sy) = cv.GetSize(src)
    cv.GetAffineTransform([(0, 0), (sx, 0), (sx, sy)], corners[:3], map)
    temp = cv.CreateMat(bg.rows, bg.cols, cv.CV_8UC3)
    cv.Set(temp, cv.RGB(0, 0, 0))
    cv.WarpAffine(src, temp, map)
    cv.Or(temp, bg, bg)

cv.ShowImage("comp", bg)
scribble = cv.CloneMat(bg)

if 0:
    for i in range(10):
        df.find(bg)

for (sym, coords) in df.find(bg).items():
    print sym
    cv.PolyLine(scribble, [coords],
                1,
                cv.CV_RGB(255, 0, 0),
                1,
                lineType=cv.CV_AA)
예제 #24
0
            cv.Zero(scratch)
            cv.Zero(scratch2)


            #idea is to have a background model from back_proj_img2, or at least an emtpy single shot
            ###cv.Sub(back_proj_img, back_proj_img2, scratch)


            #cv.SubRS(back_proj_img, 255, scratch)
            ###cv.SubRS(back_proj_img2, 255, scratch2)
            #cv.Sub(back_proj_img, back_proj_img2, scratch2) #opposite noise, but excludes object 
            cv.Sub(back_proj_img2, back_proj_img, scratch2) #noise, but includes object if failed, 
                                                            #would need to learn before then update selectively 
                                                            #Maybe want both added in the end. 
            cv.Sub(scratch2, avg_noise, scratch)            
            cv.Or(avg_noise, scratch2, avg_noise)

            ##adding this part fills in wherever the object has been too, heatmaps?
            #cv.Sub(back_proj_img2, back_proj_img, scratch)
            #cv.Or(avg_noise, scratch, avg_noise)
            #


            #cv.Sub(back_proj_img2, avg_noise, back_proj_img2)
            #cv.Sub(scratch,, back_proj_img2)
            cv.ShowImage("final", scratch)
            #cv.Sub(scratch, avg_noise, scratch2)



            #cv.And(scratch, back_proj_img2, scratch2)
예제 #25
0
    def get_projector_line_associations(self):
        rospy.loginfo("Scanning...")
        positives = []
        negatives = []
        for i in range(self.number_of_projection_patterns):
            positives.append(
                self.get_picture_of_projection(
                    self.predistorted_positive_projections[i]))
            negatives.append(
                self.get_picture_of_projection(
                    self.predistorted_negative_projections[i]))

        rospy.loginfo("Thresholding...")
        strike_sum = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(strike_sum)
        gray_codes = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(gray_codes)
        for i in range(self.number_of_projection_patterns):
            difference = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
            cv.Sub(positives[i], negatives[i], difference)

            absolute_difference = cv.CreateMat(self.camera_info.height,
                                               self.camera_info.width,
                                               cv.CV_8UC1)
            cv.AbsDiff(positives[i], negatives[i], absolute_difference)

            #Mark all the pixels that were "too close to call" and add them to the running total
            strike_mask = cv.CreateMat(self.camera_info.height,
                                       self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(absolute_difference, self.threshold, strike_mask,
                    cv.CV_CMP_LT)
            strikes = cv.CreateMat(self.camera_info.height,
                                   self.camera_info.width, cv.CV_32SC1)
            cv.Set(strikes, 1, strike_mask)
            cv.Add(strikes, strike_sum, strike_sum)

            #Set the corresponding bit in the gray_code
            bit_mask = cv.CreateMat(self.camera_info.height,
                                    self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(difference, 0, bit_mask, cv.CV_CMP_GT)
            bit_values = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_32SC1)
            cv.Set(bit_values, 2**i, bit_mask)
            cv.Or(bit_values, gray_codes, gray_codes)

        rospy.loginfo("Decoding...")
        # Decode every gray code into binary
        projector_line_associations = cv.CreateMat(self.camera_info.height,
                                                   self.camera_info.width,
                                                   cv.CV_32SC1)
        cv.Copy(gray_codes, projector_line_associations)
        for i in range(
                cv.CV_MAT_DEPTH(cv.GetElemType(projector_line_associations)),
                -1, -1):
            projector_line_associations_bitshifted_right = cv.CreateMat(
                self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            #Using ConvertScale with a shift of -0.5 to do integer division for bitshifting right
            cv.ConvertScale(projector_line_associations,
                            projector_line_associations_bitshifted_right,
                            (2**-(2**i)), -0.5)
            cv.Xor(projector_line_associations,
                   projector_line_associations_bitshifted_right,
                   projector_line_associations)

        rospy.loginfo("Post processing...")

        # Remove all pixels that were "too close to call" more than once
        strikeout_mask = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(strike_sum, 1, strikeout_mask, cv.CV_CMP_GT)
        cv.Set(projector_line_associations, -1, strikeout_mask)

        # Remove all pixels that don't decode to a valid scanline number
        invalid_scanline_mask = cv.CreateMat(self.camera_info.height,
                                             self.camera_info.width,
                                             cv.CV_8UC1)
        cv.InRangeS(projector_line_associations, 0, self.number_of_scanlines,
                    invalid_scanline_mask)
        cv.Not(invalid_scanline_mask, invalid_scanline_mask)
        cv.Set(projector_line_associations, -1, invalid_scanline_mask)

        self.display_scanline_associations(projector_line_associations)

        return projector_line_associations
예제 #26
0
def process_image(image_color, name='unnamed.jpg'):
    #image_color = cv2.resize(image_color, (1280, 848))
    image_color = cv2.blur(image_color, (3, 3))

    image_hsv = array2cv(cv2.cvtColor(image_color, cv.CV_BGR2HSV))

    mask1 = cv.CreateImage(cv.GetSize(image_hsv), 8, 1)
    mask2 = cv.CreateImage(cv.GetSize(image_hsv), 8, 1)
    mask_both = cv.CreateImage(cv.GetSize(image_hsv), 8, 1)
    demasked = cv.CreateImage(cv.GetSize(image_hsv), 8, 3)
    cv.Rectangle(demasked, (0, 0), cv.GetSize(image_hsv),
                 cv.CV_RGB(255, 255, 255), cv.CV_FILLED)

    cv.InRangeS(image_hsv, cv.Scalar(0, 64, 100), cv.Scalar(150, 255, 255),
                mask1)
    cv.InRangeS(image_hsv, cv.Scalar(150, 64, 100), cv.Scalar(180, 255, 255),
                mask2)
    cv.Or(mask1, mask2, mask_both)
    cv.Not(mask_both, mask_both)
    cv.Copy(array2cv(image_color), demasked, mask_both)

    _, tmp = tempfile.mkstemp('.png')
    cv.SaveImage(tmp, demasked)
    demasked = cv2.imread(tmp)
    os.unlink(tmp)

    image = cv2.cvtColor(demasked, cv.CV_RGB2GRAY)
    image = cv2.equalizeHist(image)
    #image = cv2.blur(image, (3, 3))
    image = cv2.dilate(image, None, iterations=2)
    _, image = cv2.threshold(image, 80, 255,
                             cv.CV_THRESH_BINARY + cv.CV_THRESH_OTSU)
    h, w = image.shape

    MIN_AREA = 100
    MIN_WIDTH = w * 0.1
    MAX_WIDTH = w * 0.5
    EPSILON = 0.1
    TEMPLATE = cv.LoadImage('template.jpg', 0)

    storage = cv.CreateMemStorage()
    TEMPLATE_CONTOURS = cv.FindContours(TEMPLATE,
                                        storage,
                                        mode=cv.CV_RETR_EXTERNAL,
                                        method=cv.CV_CHAIN_APPROX_NONE,
                                        offset=(0, 0))

    storage = cv.CreateMemStorage()
    im_cv = array2cv(image)
    im_cv_c = array2cv(cv2.cvtColor(image, cv.CV_GRAY2RGB))

    cv.XorS(im_cv, cv.Scalar(255, 0, 0, 0), im_cv, None)

    contours = cv.FindContours(im_cv,
                               storage,
                               mode=cv.CV_RETR_EXTERNAL,
                               method=cv.CV_CHAIN_APPROX_NONE,
                               offset=(0, 0))
    if contours:
        cv.DrawContours(im_cv, contours, (0, 0, 0), (0, 0, 0), 7, -1)

        biggestCircle = None
        BC_cnt = None
        while contours:
            area = cv.ContourArea(contours)

            if area < MIN_AREA:
                contours = contours.h_next()
                continue

            storage2 = cv.CreateMemStorage(0)
            hull = cv.ConvexHull2(contours, storage2, cv.CV_CLOCKWISE, 1)
            if hull:
                cv.PolyLine(im_cv_c, [hull], 1, cv.RGB(0, 255, 0), 4, cv.CV_AA)

                xmax, xmin, ymax, ymin = 0, w, 0, h
                for x, y in list(hull):
                    xmax = max(xmax, x)
                    ymax = max(ymax, y)
                    xmin = min(xmin, x)
                    ymin = min(ymin, y)

                cv.Rectangle(im_cv_c, (xmin, ymin), (xmax, ymax),
                             cv.RGB(0, 255, 255), 4)

                height = (ymax - ymin)
                width = (xmax - xmin)
                if width > MAX_WIDTH or width < MIN_WIDTH:
                    contours = contours.h_next()
                    continue

                diff = cv.MatchShapes(contours, TEMPLATE_CONTOURS,
                                      cv.CV_CONTOURS_MATCH_I3)
                if diff < EPSILON:
                    cv.DrawContours(im_cv_c, contours, (255, 0, 0),
                                    (255, 0, 0), 0, -1)
                    if not biggestCircle:
                        biggestCircle = xmin, ymin, width, height
                        BC_cnt = contours
                    else:
                        if width > biggestCircle[2]:
                            biggestCircle = xmin, ymin, width, height
                            BC_cnt = contours

            contours = contours.h_next()

    if biggestCircle:
        cv.DrawContours(im_cv_c, BC_cnt, (255, 0, 255), (255, 0, 255), 0, -1)
        cv.SaveImage('contours/' + name, im_cv_c)
        return cut_it_out(image, *biggestCircle, name=name)

    #cv.SaveImage('contours/' + name, im_cv_c)
    #cv2.imwrite('gray/' + name, image_processed_color)

    cv.SaveImage('contours/' + name, im_cv_c)
    return None, None
예제 #27
0
    def image_callback(self, data):
        """ Time this loop to get cycles per second """
        start = rospy.Time.now()
        """ Convert the raw image to OpenCV format using the convert_image() helper function """
        cv_image = self.convert_image(data)
        """ Some webcams invert the image """
        if self.flip_image:
            cv.Flip(cv_image)
        """ Create a few images we will use for display """
        if not self.image:
            self.image_size = cv.GetSize(cv_image)
            self.image = cv.CreateImage(self.image_size, 8, 3)
            self.marker_image = cv.CreateImage(self.image_size, 8, 3)
            self.display_image = cv.CreateImage(self.image_size, 8, 3)
            self.processed_image = cv.CreateImage(self.image_size, 8, 3)
            cv.Zero(self.marker_image)
        """ Copy the current frame to the global image in case we need it elsewhere"""
        cv.Copy(cv_image, self.image)

        if not self.keep_marker_history:
            cv.Zero(self.marker_image)
        """ Process the image to detect and track objects or features """
        processed_image = self.process_image(cv_image)
        """ If the result is a greyscale image, convert to 3-channel for display purposes """
        if processed_image.channels == 1:
            cv.CvtColor(processed_image, self.processed_image, cv.CV_GRAY2BGR)
        else:
            cv.Copy(processed_image, self.processed_image)
        """ Display the user-selection rectangle or point."""
        self.display_markers()

        if self.night_mode:
            """ Night mode: only display the markers """
            cv.SetZero(self.processed_image)
        """ Merge the processed image and the marker image """
        cv.Or(self.processed_image, self.marker_image, self.display_image)
        # TODO Draw the images on the rectangle
        # if self.track_box:
        #     if self.auto_face_tracking:
        #         cv.EllipseBox(self.display_image, self.track_box, cv.CV_RGB(255, 0, 0), 2)
        #     else:
        #         (center, size, angle) = self.track_box
        #         pt1 = (int(center[0] - size[0] / 2), int(center[1] - size[1] / 2))
        #         pt2 = (int(center[0] + size[0] / 2), int(center[1] + size[1] / 2))
        #
        #         cv.Rectangle(self.display_image, pt1, pt2, cv.RGB(255, 0, 0), 2, 8, 0)
        #
        # elif self.detect_box:
        #     (pt1_x, pt1_y, w, h) = self.detect_box
        #     cv.Rectangle(self.display_image, (pt1_x, pt1_y), (pt1_x + w, pt1_y + h), cv.RGB(255, 0, 0), 2, 8, 0)
        """ Handle keyboard events """
        self.keystroke = cv.WaitKey(5)

        duration = rospy.Time.now() - start
        duration = duration.to_sec()
        fps = int(1.0 / duration)
        self.cps_values.append(fps)
        if len(self.cps_values) > self.cps_n_values:
            self.cps_values.pop(0)
        self.cps = int(sum(self.cps_values) / len(self.cps_values))

        if self.show_text:
            hscale = 0.2 * self.image_size[0] / 160. + 0.1
            vscale = 0.2 * self.image_size[1] / 120. + 0.1
            text_font = cv.InitFont(cv.CV_FONT_VECTOR0, hscale, vscale, 0, 1,
                                    8)
            """ Print cycles per second (CPS) and resolution (RES) at top of the image """
            if self.image_size[0] >= 640:
                vstart = 25
                voffset = int(50 + self.image_size[1] / 120.)
            elif self.image_size[0] == 320:
                vstart = 15
                voffset = int(35 + self.image_size[1] / 120.)
            else:
                vstart = 10
                voffset = int(20 + self.image_size[1] / 120.)
            cv.PutText(self.display_image, "CPS: " + str(self.cps),
                       (10, vstart), text_font, cv.RGB(255, 255, 0))
            cv.PutText(
                self.display_image, "RES: " + str(self.image_size[0]) + "X" +
                str(self.image_size[1]), (10, voffset), text_font,
                cv.RGB(255, 255, 0))

        if not self.headless:
            # Now display the image.
            cv.ShowImage(self.cv_window_name, self.display_image)
        """ Publish the display image back to ROS """
        try:
            """ Convertion for cv2 is needed """
            cv2_image = numpy.asarray(self.display_image[:, :])
            self.output_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv2_image, "bgr8"))
        except CvBridgeError, e:
            logger.error(e)
예제 #28
0
class video_processor:
    def __init__(self):
        self.sub = rospy.Subscriber('usb_cam/image_raw', Image, self.callback)
        self.pub = rospy.Publisher('heading', Twist)
        self.speed = float(1)
        self.bridge = CvBridge()
        cv.NamedWindow("Input Video")
        #cv.NamedWindow("Blur Video")
        #cv.NamedWindow("HSV Video")
        #cv.NamedWindow("Hue Video")
        #cv.NamedWindow("Saturation Video")
        #cv.NamedWindow("Value Video")
        cv.NamedWindow("Red-Orange Video")
        cv.NamedWindow("White Video")
        cv.NamedWindow("Red-Orange and White Video")
        #cv.WaitKey(0)

    def callback(self, image_in):
        try:
            input_image = self.bridge.imgmsg_to_cv(image_in,"bgr8")
        except CvBridgeError, e:
            print e
        cv.ShowImage("Input Video", input_image)

        blur_image = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC3)
        cv.Smooth(input_image,blur_image,cv.CV_BLUR, 10, 10)
        #cv.ShowImage("Blur Video", proc_image)
        proc_image = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC3)
        cv.CvtColor(blur_image, proc_image, cv.CV_BGR2HSV)
        #cv.ShowImage("HSV Video", proc_image)
        split_image = [cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1),cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1),cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)]
        cv.Split(proc_image, split_image[0],split_image[1],split_image[2], None )
        #hue = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        #sat = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        #val = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        #cv.Split(proc_image, hue,sat,val, None )
        #cv.ShowImage("Hue Video", hue)
        #cv.ShowImage("Saturation Video", sat)
        #cv.ShowImage("Value Video", val)

        thresh_0 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        thresh_1 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        thresh_2 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        red_orange = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)
        cv.Threshold(split_image[1],thresh_0, 128,255,cv.CV_THRESH_BINARY) # > 50% saturation
        cv.Threshold(split_image[0],thresh_1, 220,255,cv.CV_THRESH_BINARY) # > Purple
        cv.Threshold(split_image[0],thresh_2, 10, 255,cv.CV_THRESH_BINARY_INV) # < Yellow-Orange
        cv.Add(thresh_1,thresh_2,red_orange)
        cv.And(red_orange,thresh_0,red_orange)
        cv.ShowImage("Red-Orange Video",red_orange)

        cv.CvtColor(blur_image, proc_image, cv.CV_BGR2HLS)
        cv.Split(proc_image, split_image[0], split_image[1],split_image[2], None )
        cv.Threshold(split_image[1],thresh_0, 204,255,cv.CV_THRESH_BINARY) # > 80% Lum
        cv.ShowImage("White Video",thresh_0)
        cv.Or(red_orange, thresh_0, thresh_0)
        cv.ShowImage("Red-Orange and White Video",thresh_0)
        cv.WaitKey(30)

        ang_z = 0
        x = 0
        for i in range(input_image.rows):
            y = -(input_image.cols / 2)
            row = cv.GetRow(thresh_0,i)
            for j in row.tostring():
                ang_z = ang_z + (x * y *ord(j))
                y = y + 1
            x = x + 1
        ang_z = (ang_z * pi * 2 * 2 * 4 / 255 / input_image.rows / input_image.rows / input_image.cols / input_image.cols)
        p = Twist()
        p.linear.x = self.speed
        p.angular.z = ang_z
        self.pub.publish(p)