Example #1
0
    def undistort_points(self, src):
        """
        :param src: N source pixel points (u,v) as an Nx2 matrix
        :type src: :class:`cvMat`

        Apply the post-calibration undistortion to the source points
        """

        dst = cv.CloneMat(src)
        cv.UndistortPoints(src,
                           dst,
                           self.intrinsics,
                           self.distortion,
                           R=self.R,
                           P=self.P)
        return dst
Example #2
0
    def downsample_and_detect(self, rgb):
        """
        Downsample the input image to approximately VGA resolution and detect the
        calibration target corners in the full-size image.

        Combines these apparently orthogonal duties as an optimization. Checkerboard
        detection is too expensive on large images, so it's better to do detection on
        the smaller display image and scale the corners back up to the correct size.

        Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)).
        """
        # Scale the input image down to ~VGA size
        (width, height) = cv.GetSize(rgb)
        scale = math.sqrt((width * height) / (640. * 480.))
        if scale > 1.0:
            scrib = cv.CreateMat(int(height / scale), int(width / scale),
                                 cv.GetElemType(rgb))
            cv.Resize(rgb, scrib)
        else:
            scrib = cv.CloneMat(rgb)
        # Due to rounding, actual horizontal/vertical scaling may differ slightly
        x_scale = float(width) / scrib.cols
        y_scale = float(height) / scrib.rows

        # Detect checkerboard
        (ok, downsampled_corners, board) = self.get_corners(scrib, refine=True)

        # Scale corners back to full size image
        corners = None
        if ok:
            if scale > 1.0:
                # Refine up-scaled corners in the original full-res image
                # TODO Does this really make a difference in practice?
                corners_unrefined = [(c[0] * x_scale, c[1] * y_scale)
                                     for c in downsampled_corners]
                # TODO It's silly that this conversion is needed, this function should just work
                #      on the one-channel mono image
                mono = cv.CreateMat(rgb.rows, rgb.cols, cv.CV_8UC1)
                cv.CvtColor(rgb, mono, cv.CV_BGR2GRAY)
                radius = int(math.ceil(scale))
                corners = cv.FindCornerSubPix(
                    mono, corners_unrefined, (radius, radius), (-1, -1),
                    (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
            else:
                corners = downsampled_corners

        return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))
Example #3
0
    def detect(self, image):
        img_label = cv.CloneMat(image)

        self._AddBorder(img_label)

        uf = []

        label = 1

        for i in xrange(1, image.height):
            for j in xrange(1, image.width):
                current = (i, j)
                if image[current] == 0:
                    continue

                north = (i - 1, j)
                south = (i + 1, j)
                east = (i, j + 1)
                west = (i, j - 1)

                # Do the pixels to the North and West of the current pixel have the same value but not the same label?
                if (image[north] == image[west] and image[west] != 0) and (
                        img_label[north] != img_label[west]):
                    # Merge
                    min_label = min(img_label[north], img_label[west])
                    max_label = max(img_label[north], img_label[west])

                    img_label[current] = min_label

                    self._Union(uf[int(min_label) - 1], uf[int(max_label) - 1])
                # Does the pixel to the left (West) have the same value?
                elif image[west] == image[current]:
                    img_label[current] = img_label[west]
                # Does the pixel to the left (West) have a different value and the one to the North the same value?
                elif (image[west] != 0 and image[west] != image[current]) and (
                        image[north] == image[current]):
                    img_label[current] = img_label[north]
                else:
                    img_label[current] = label
                    node = self._Node(label)
                    self._MakeSet(node)
                    uf.append(node)
                    label = label + 1

        # Relabel every labeled pixel with its representative
        return self._Relabel(img_label, uf)
Example #4
0
def show_keypoints(data):
    """ paint and show images with keypoints"""
    for i in range(len(data)):
        try:
            if data[i].facedata:                
                for j in range(len(data[i].facedata)):                                          
                    nbr_keypoints = len(data[i].facedata[j].keypoints)
                    print('%d Features found in file %s' %(nbr_keypoints, data[i].filename))
                    tmpImage = cv.CloneMat(data[i].facedata[j].face)
                    for k in range (nbr_keypoints):
                        if parameter.description_method == 'sift':
                            size = int(data[i].facedata[j].keypoints[k].size)
                        elif parameter.description_method == 'surf':
                            size = int(data[i].facedata[j].keypoints[k].size)/2
                        cv.Circle(tmpImage, (int(data[i].facedata[j].keypoints[k].pt[0]), int(data[i].facedata[j].keypoints[k].pt[1])), size, 255)
                    show_images(tmpImage)
        except:
            logging.error('Error showing keypoints - Maybe no Face in File %s (tools.py)' % data[i].filename)
Example #5
0
def clone(im):
    """
	Clones the image.  The function checks whether we have an IplImage 
	or a cvMat and returns the same type.
	
	**Parameters:**
	 	* im (cvArr) - The source image.
	
	**Returns:**
		The cloned image.
	
	.. seealso::
		:func:`create()`
	"""
    try:
        im.depth
        return cv.CloneImage(im)
    except AttributeError:
        return cv.CloneMat(im)
Example #6
0
    def set_hist(self, frame, selection):
        sub = cv.GetSubRect(frame, selection)
        save = cv.CloneMat(sub)

        cv.ConvertScale(frame, frame, 0.5)
        cv.Copy(save, sub)
        x, y, w, h = selection

        # rectangular piece of frame
        cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

        sel = cv.GetSubRect(self.hue, selection)
        cv.CalcArrHist([sel], self.hist, 0)

        # get the most prevalent color in the histogram
        (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist)

        if max_val != 0:
            cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val)
            print "Val set to " + str(max_val)
    def __init__(self, imagem_fonte):

        self.imagem_fonte = imagem_fonte
        self.imagem_destino = cv.CloneMat(imagem_fonte)
        self.imagemHistograma = cv.CreateImage((512, 100), 8, 1)
        self.histograma = cv.CreateHist([512], cv.CV_HIST_ARRAY, [[0, 256]], 1)

        self.brilho = 0
        self.contraste = 0

        cv.NamedWindow("Foto Tons Cinza", 1)
        cv.NamedWindow("Foto Equalizada", 1)
        cv.NamedWindow("Histograma Equalizado", 1)

        cv.CreateTrackbar("Brilho", "Histograma Equalizado", 100, 200,
                          self.atualiza_brilho)
        cv.CreateTrackbar("Contraste", "Histograma Equalizado", 100, 200,
                          self.atualiza_contraste)

        self.atualiza_brilhocontraste()
Example #8
0
def doloop():
	global depth, rgb
	for i in range(1,10):
		(depth,_), (rgb,_) = get_depth(), get_video()
		bg=cv.CloneMat(cv.fromarray(depth.astype(numpy.uint8)))
	scratch = cv.CreateImage((640,480),8,1)
	scratch2 = cv.CreateImage((640,480),8,1)
	cv.SaveImage('bg.png',bg)xz
	
	while True:
		# Get a fresh frame
		(depth,_), (rgb,_) = get_depth(), get_video()
		depth=cv.fromarray(depth.astype(numpy.uint8))
		cv.AbsDiff(bg,depth,scratch)
		cv.Sub(scratch,2,10,scratch2)
		# cv.ConvertScale(scratch,scratch2,50)
		cv.Add(depth,scratch2,scratch2)
        
		# Simple Downsample
		cv.ShowImage('both',scratch2)
		cv.WaitKey(10)
Example #9
0
    def __call__(image, offset=None):
        from gamera.plugins import _string_io
        from gamera.core import Dim, Point, RGBPixel

        if offset is None:
            offset = Point(0, 0)
        if isinstance(image, cv.cvmat):
            imgcv = cv.CloneMat(image)
            cv.CvtColor(image, imgcv, cv.CV_BGR2RGB)
            return _string_io._from_raw_string(offset,
                                               Dim(imgcv.cols, imgcv.rows),
                                               RGB, DENSE, imgcv.tostring())
        elif isinstance(image, cv.iplimage):
            imgcv = cv.CreateImage(cv.GetSize(image), image.depth,
                                   image.channels)
            cv.CvtColor(image, imgcv, cv.CV_BGR2RGB)
            return _string_io._from_raw_string(offset,
                                               Dim(imgcv.width, imgcv.height),
                                               RGB, DENSE, imgcv.tostring())
        else:
            raise TypeError("Image must be of type cv.cvmat or cv.iplimage")
Example #10
0
    def detect(self, msg):
        """
        Downsample the input image to approximately VGA resolution and detect the
        calibration target corners in the full-size image.

        Combines these apparently orthogonal duties as an optimization. Checkerboard
        detection is too expensive on large images, so it's better to do detection on
        the smaller display image and scale the corners back up to the correct size.

        Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)).
        """
        mono = self.mk_gray(msg)
        # Scale the input image down to ~VGA size
        (width, height) = cv.GetSize(mono)
        #print "width: " + str(width) + " height: " + str(height)
        scale = math.sqrt( (width*height) / (640.*480.) )
        if scale > 1.0:
            scrib = cv.CreateMat(int(height / scale), int(width / scale), cv.GetElemType(mono))
            cv.Resize(mono, scrib)
        else:
            scrib = cv.CloneMat(mono)
        # Due to rounding, actual horizontal/vertical scaling may differ slightly
        x_scale = float(width) / scrib.cols
        y_scale = float(height) / scrib.rows

        (ok, downsampled_corners) = self.get_corners(scrib, refine = True)

        # Scale corners back to full size image
        if scale > 1.0:
            # Refine up-scaled corners in the original full-res image
            # TODO Does this really make a difference in practice?
            corners_unrefined = [(c[0]*x_scale, c[1]*y_scale) for c in downsampled_corners]
            radius = int(math.ceil(scale))
            corners = cv.FindCornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1),
                                          ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 ))
        else:
            corners = downsampled_corners

        return (ok, corners, mono, (x_scale, y_scale))
Example #11
0
def clone(something):
    if something.__class__ == cv.cvmat:
        return cv.CloneMat(something)
    else:
        return cv.CloneImage(something)
Example #12
0
    def calibrateFromObservations(self,
                                  observations,
                                  calibextrinsic=True,
                                  pruneoutliers=True,
                                  full_output=True,
                                  fixprincipalpoint=False,
                                  Tcameraestimate=None):
        while True:
            if len(observations) < 3:
                raise self.CalibrationError('very little observations: %d!' %
                                            len(observations),
                                            action=1)
            object_points = self.getObjectPoints()
            all_object_points = []
            all_corners = []
            imagesize = None
            for o in observations:
                all_object_points.append(object_points)
                all_corners.append(o['corners_raw'])
                if imagesize:
                    assert imagesize[0] == o['image_raw'].shape[
                        1] and imagesize[1] == o['image_raw'].shape[0]
                else:
                    imagesize = (o['image_raw'].shape[1],
                                 o['image_raw'].shape[0])
            intrinsiccondition = self.validateCalibrationData(
                all_object_points, all_corners)
            if intrinsiccondition is None:
                raise self.CalibrationError(
                    'bad condition number, %d observations' %
                    (len(observations)),
                    action=1)
            KKorig, kcorig, Traws, error_intrinsic, error_grad = self.calibrateIntrinsicCamera(
                all_object_points,
                all_corners,
                imagesize,
                fixprincipalpoint=fixprincipalpoint,
                computegradients=True)
            cvKKorig = cv.fromarray(KKorig)
            cvkcorig = cv.fromarray(kcorig)
            thresh = median(error_intrinsic) + 0.5
            if any(error_intrinsic > thresh):
                # compute again using a pruned set of observations
                print 'pruning observations (thresh=%f) because intrinsic error is: ' % thresh, error_intrinsic
                observations = [
                    o for i, o in enumerate(observations)
                    if error_intrinsic[i] <= thresh
                ]
            else:
                if mean(error_intrinsic) > 0.6:
                    raise self.CalibrationError(
                        'intrinsic error is huge (%s)! giving up, KK=(%s)' %
                        (str(error_intrinsic), str(KKorig)))
                break
        if not calibextrinsic:
            return KKorig, kcorig, None, {
                'error_intrinsic': error_intrinsic,
                'intrinsiccondition': intrinsiccondition,
                'error_grad': error_grad
            }
        if Tcameraestimate is None:
            raise TypeError(
                'Tcameraestimate needs to be initialized to a 4x4 matrix')
        # unwarp all images and re-detect the checkerboard patterns
        cvKK = cv.CreateMat(3, 3, cv.CV_64F)
        cv.GetOptimalNewCameraMatrix(cvKKorig, cvkcorig, imagesize, 0, cvKK)
        cvUndistortionMapX = cv.CreateMat(imagesize[1], imagesize[0],
                                          cv.CV_32F)
        cvUndistortionMapY = cv.CreateMat(imagesize[1], imagesize[0],
                                          cv.CV_32F)
        cv.InitUndistortRectifyMap(cvKKorig, cvkcorig, cv.fromarray(eye(3)),
                                   cvKK, cvUndistortionMapX,
                                   cvUndistortionMapY)
        KK = array(cvKK)
        KKinv = linalg.inv(KK)
        if full_output:
            print 'KKorig: ', KKorig, '  KK:', KK
        cvimage = None
        cvkczero = cv.fromarray(zeros(kcorig.shape))
        cvrvec = cv.CreateMat(1, 3, cv.CV_64F)
        cvtvec = cv.CreateMat(1, 3, cv.CV_64F)
        all_optimization_data = []
        Tworldpatternestimates = []
        for i, o in enumerate(observations):
            cvimage_dist = cv.fromarray(o['image_raw'])
            if not cvimage:
                cvimage = cv.CloneMat(cvimage_dist)
            cv.Remap(cvimage_dist, cvimage, cvUndistortionMapX,
                     cvUndistortionMapY,
                     cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS)
            corners = self.detect(cvimage)
            if corners is not None:
                cv.FindExtrinsicCameraParams2(cv.fromarray(object_points),
                                              cv.fromarray(corners), cvKK,
                                              cvkczero, cvrvec, cvtvec)
                T = matrixFromAxisAngle(array(cvrvec)[0])
                T[0:3, 3] = array(cvtvec)[0]
                Tworldpatternestimates.append(
                    dot(dot(o['Tlink'], Tcameraestimate), T))
                all_optimization_data.append(
                    (transformPoints(KKinv, corners), linalg.inv(o['Tlink'])))
            else:
                print 'could not detect original pattern ', i

        # have the following equation: Tlink * Tcamera * Tdetectedpattern * corners3d = Tworldpattern * corners3d
        # need to solve for Tcamera and Tworldpattern
        # instead of using Tdetectedpattern, use projected difference:
        # corners - proj( inv(Tcamera) * inv(Tlink) * Tworldpattern *corners3d)
        corners3d = self.getObjectPoints()
        quatWorldPatternEstimates = array([
            quatFromRotationMatrix(T[0:3, 0:3]) for T in Tworldpatternestimates
        ])
        quatWorldPatternInitial, success = leastsq(
            lambda q: quatArrayTDist(q / sqrt(sum(q**2)),
                                     quatWorldPatternEstimates),
            quatWorldPatternEstimates[0],
            maxfev=100000,
            epsfcn=1e-6)

        rWorldPatternInitial = zeros(6, float64)
        rWorldPatternInitial[0:3] = axisAngleFromQuat(quatWorldPatternInitial)
        rWorldPatternInitial[3:6] = mean(
            array([T[0:3, 3] for T in Tworldpatternestimates]), 0)
        Tcameraestimateinv = linalg.inv(Tcameraestimate)
        rCameraInitial = zeros(6, float64)
        rCameraInitial[0:3] = axisAngleFromRotationMatrix(
            Tcameraestimateinv[0:3, 0:3])
        rCameraInitial[3:6] = Tcameraestimateinv[0:3, 3]

        def errorfn(x, optimization_data):
            Tworldpattern = matrixFromAxisAngle(x[0:3])
            Tworldpattern[0:3, 3] = x[3:6]
            Tcamerainv = matrixFromAxisAngle(x[6:9])
            Tcamerainv[0:3, 3] = x[9:12]
            err = zeros(len(optimization_data) * len(corners3d) * 2)
            off = 0
            for measuredcameracorners, Tlinkinv in optimization_data:
                cameracorners3d = transformPoints(
                    dot(dot(Tcamerainv, Tlinkinv), Tworldpattern), corners3d)
                iz = 1.0 / cameracorners3d[:, 2]
                err[off:(
                    off + len(corners3d)
                )] = measuredcameracorners[:, 0] - cameracorners3d[:, 0] * iz
                off += len(corners3d)
                err[off:(
                    off + len(corners3d)
                )] = measuredcameracorners[:, 1] - cameracorners3d[:, 1] * iz
                off += len(corners3d)
            if full_output:
                print 'rms: ', sqrt(sum(err**2))
            return err

        optimization_data = all_optimization_data
        xorig, cov_x, infodict, mesg, iter = leastsq(
            lambda x: errorfn(x, all_optimization_data),
            r_[rWorldPatternInitial, rCameraInitial],
            maxfev=100000,
            epsfcn=1e-6,
            full_output=1)
        if pruneoutliers:
            # prune the images with the most error
            errors = reshape(
                errorfn(xorig, all_optimization_data)**2,
                (len(all_optimization_data), len(corners3d) * 2))
            errorreprojection = mean(
                sqrt(KK[0, 0]**2 * errors[:, 0:len(corners3d)] +
                     KK[1, 1]**2 * errors[:, len(corners3d):]), 1)
            #thresh=mean(errorreprojection)+std(errorreprojection)
            thresh = median(
                errorreprojection) + 20  #0.5*std(errorreprojection)
            print 'thresh:', thresh, 'errors:', errorreprojection
            optimization_data = [
                all_optimization_data[i]
                for i in flatnonzero(errorreprojection <= thresh)
            ]
            x, cov_x, infodict, mesg, iter = leastsq(
                lambda x: errorfn(x, optimization_data),
                xorig,
                maxfev=100000,
                epsfcn=1e-6,
                full_output=1)
        else:
            x = xorig
        Tcamerainv = matrixFromAxisAngle(x[6:9])
        Tcamerainv[0:3, 3] = x[9:12]
        Tcamera = linalg.inv(Tcamerainv)
        Tworldpattern = matrixFromAxisAngle(x[0:3])
        Tworldpattern[0:3, 3] = x[3:6]
        points3d = self.Compute3DObservationPoints(Tcamerainv,
                                                   optimization_data)

        if full_output:
            errors = reshape(
                errorfn(x, optimization_data)**2,
                (len(optimization_data), len(corners3d) * 2))
            error_reprojection = sqrt(
                KK[0, 0]**2 * errors[:, 0:len(corners3d)] +
                KK[1, 1]**2 * errors[:, len(corners3d):]).flatten()
            print 'final reprojection error (pixels): ', mean(
                error_reprojection), std(error_reprojection)
            error_3d = sqrt(
                sum((transformPoints(Tworldpattern, corners3d) - points3d)**2,
                    1))
            print '3d error: ', mean(error_3d), std(error_3d)
            return KKorig, kcorig, Tcamera, {
                'error_reprojection': error_reprojection,
                'error_3d': error_3d,
                'error_intrinsic': error_intrinsic,
                'intrinsiccondition': intrinsiccondition,
                'error_grad': error_grad,
                'KK': array(cvKK)
            }
        return KKorig, kcorig, Tcamera, None
    def process(self,cv_image,info,image2=None):
        self.load_model(self.modelpath)
        if self.transform:
            H = cv.Load(self.matrix_location)
            input_image = cv.CloneImage(cv_image)
            cv.WarpPerspective(input_image,cv_image,H,
                    cv.CV_INTER_LINEAR+cv.CV_WARP_INVERSE_MAP+cv.CV_WARP_FILL_OUTLIERS)
        #Use the thresholding module to get the contour out
        shape_contour = thresholding.get_contour(cv_image,bg_mode=self.bg_mode,filter_pr2=self.filter_pr2
                                                    ,crop_rect=(self.cropx,self.cropy,self.cropwidth,self.cropheight),cam_info=info,listener=self.listener)

        #Use the shape_fitting module to fit the model to the contour
        
        if self.mode=="tee":
            #fitter = shape_fitting.ShapeFitter(SYMM_OPT=True,ORIENT_OPT=False,FINE_TUNE=False)
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=True,FINE_TUNE=False,num_iters=self.num_iters)
        elif self.mode=="sweater":
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=False,FINE_TUNE=False,num_iters=self.num_iters)
        elif self.mode=="folded":
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=False,FINE_TUNE=False,INITIALIZE=False,num_iters=self.num_iters)
        else:
            fitter = shape_fitting.ShapeFitter(SYMM_OPT=False,ORIENT_OPT=False,FINE_TUNE=False,num_iters=self.num_iters)
        image_anno = cv.CloneImage(cv_image)
        (nearest_pts, final_model, fitted_model) = fitter.fit(self.model,shape_contour,image_anno)
        pts = nearest_pts
        
        params = {}
        
        
        if self.mode == "triangles":
            return_pts = [pts[1],pts[4],pts[2],pts[3]]
            self.highlight_pt(pts[1],cv.CV_RGB(255,0,0),image_anno)
            font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX,1.0,1.0)
            cv.PutText(image_anno,"(l)eft",(pts[1][0]-20,pts[1][1]-15),font,cv.CV_RGB(255,0,0))
            self.highlight_pt(pts[4],cv.CV_RGB(0,0,255),image_anno)
            cv.PutText(image_anno,"(r)ight",(pts[4][0]-20,pts[4][1]-15),font,cv.CV_RGB(0,0,255))
            params = {"tilt":0.0}
        elif self.mode == "towel":
            return_pts = pts
        elif self.mode == "tee" or self.mode == "sweater":
            return_pts = pts[0:5]+pts[8:]
            params = {}
        elif self.mode == "folded":
            return_pts = [final_model.fold_bottom(),final_model.fold_top()]
        else:
            return_pts = pts
            params = {}
        if self.transform:
            H_inv = cv.CloneMat(H)
            cv.Invert(H,H_inv)
            anno_unchanged = cv.CloneImage(image_anno)
            cv.WarpPerspective(anno_unchanged,image_anno,H_inv,
                    cv.CV_INTER_LINEAR+cv.CV_WARP_INVERSE_MAP+cv.CV_WARP_FILL_OUTLIERS)
            new_return_pts = self.transform_pts(return_pts,H_inv)
            for i,pt in enumerate(new_return_pts):
                return_pts[i] = pt
        if self.mode != "triangles":
            for pt in return_pts:
                self.highlight_pt(pt,cv.CV_RGB(255,0,0),image_anno)
        if self.mode != "folded":
            fitted_model.set_image(None)
            pickle.dump(fitted_model,open("%s/last_model.pickle"%self.save_dir,'w'))
        else:
            model_pts = final_model.vertices_full()
            new_model = Models.Point_Model_Contour_Only_Asymm(*model_pts)
            pickle.dump(new_model,open("%s/last_model.pickle"%self.save_dir,'w'))
        score = final_model.score(shape_contour) #fitter.energy_fxn(final_model,shape_contour)
        params["score"] = score
        return (return_pts,params,image_anno)
Example #14
0
    def threshRed(self):
        THRESH_RED_WIDTH_ROI = 200
        THRESH_RED_HEIGHT_ROI = 90

        if not self.img.has_key('left') or not self.img.has_key('right'):
            return

        leftImg = cv.CloneMat(self.img['left'])
        rightImg = cv.CloneMat(self.img['right'])
        width = cv.GetSize(leftImg)[0]
        height = cv.GetSize(leftImg)[1]

        estimated_gripper_pose, time_since_detection = self.getEstimatedPose()

        gripper_pos = tfx.pose(estimated_gripper_pose).position
        tf_tool_to_cam = tfx.lookupTransform('/left_BC',
                                             gripper_pos.frame,
                                             wait=10)
        gripper_pos = tf_tool_to_cam * gripper_pos
        stereoModel = image_geometry.StereoCameraModel()
        stereoModel.fromCameraInfo(self.info['left'], self.info['right'])
        (u_l, v_l), (u_r, v_r) = stereoModel.project3dToPixel(gripper_pos.list)

        def expandROIRegion(start_ROI,
                            elapsed_time,
                            max_elapsed_time=20.,
                            growth_factor=2.):
            elapsed_time = min(elapsed_time, max_elapsed_time)

            return int(start_ROI + start_ROI *
                       (elapsed_time / max_elapsed_time) * growth_factor)

        #THRESH_RED_WIDTH_ROI = expandROIRegion(THRESH_RED_WIDTH_ROI, time_since_detection)
        #THRESH_RED_HEIGHT_ROI = expandROIRegion(THRESH_RED_HEIGHT_ROI, time_since_detection)

        leftImg_lb_width = int(
            u_l -
            THRESH_RED_WIDTH_ROI) if int(u_l - THRESH_RED_WIDTH_ROI) > 0 else 0
        leftImg_ub_width = int(u_l + THRESH_RED_WIDTH_ROI) if int(
            u_l + THRESH_RED_WIDTH_ROI) < width else width - 1
        leftImg_lb_height = int(v_l - THRESH_RED_HEIGHT_ROI) if int(
            v_l - THRESH_RED_HEIGHT_ROI) > 0 else 0
        leftImg_ub_height = int(v_l + THRESH_RED_HEIGHT_ROI) if int(
            v_l + THRESH_RED_HEIGHT_ROI) < height else height - 1
        rightImg_lb_width = int(
            u_r -
            THRESH_RED_WIDTH_ROI) if int(u_r - THRESH_RED_WIDTH_ROI) > 0 else 0
        rightImg_ub_width = int(u_r + THRESH_RED_WIDTH_ROI) if int(
            u_r + THRESH_RED_WIDTH_ROI) < width else width - 1
        rightImg_lb_height = int(v_r - THRESH_RED_HEIGHT_ROI) if int(
            v_r - THRESH_RED_HEIGHT_ROI) > 0 else 0
        rightImg_ub_height = int(v_r + THRESH_RED_HEIGHT_ROI) if int(
            v_r + THRESH_RED_HEIGHT_ROI) < height else height - 1

        if self.print_messages:
            print('(height, width) = ({0}, {1})'.format(height, width))
            print('time_since_detection: {0}'.format(time_since_detection))
            print('leftImg[{0}:{1},{2}:{3}]'.format(leftImg_lb_height,
                                                    leftImg_ub_height,
                                                    leftImg_lb_width,
                                                    leftImg_ub_width))
            print('rightImg[{0}:{1},{2}:{3}]'.format(rightImg_lb_height,
                                                     rightImg_ub_height,
                                                     rightImg_lb_width,
                                                     rightImg_ub_width))

        if leftImg_lb_width >= leftImg_ub_width or leftImg_lb_height >= leftImg_ub_height or rightImg_lb_width >= rightImg_ub_width or rightImg_lb_height >= rightImg_ub_height:
            return

        leftImg = leftImg[leftImg_lb_height:leftImg_ub_height,
                          leftImg_lb_width:leftImg_ub_width]
        rightImg = rightImg[rightImg_lb_height:rightImg_ub_height,
                            rightImg_lb_width:rightImg_ub_width]
        if self.show_thresh_red_images:
            self.red_roi_pub.publish(self.bridge.cv_to_imgmsg(leftImg))

        leftThresh = cv.CreateImage(cv.GetSize(leftImg), 8, 1)
        rightThresh = cv.CreateImage(cv.GetSize(rightImg), 8, 1)
        leftThresh = threshold(leftImg, leftImg, leftThresh, RED_LOWER,
                               RED_UPPER)
        rightThresh = threshold(rightImg, rightImg, rightThresh, RED_LOWER,
                                RED_UPPER)

        if self.show_thresh_red_images:
            self.red_thresh_roi_pub.publish(
                self.bridge.cv_to_imgmsg(leftThresh))

        leftContours = find_contours(leftThresh, "leftThresh")
        rightContours = find_contours(rightThresh, "rightThresh")
        foundRed = True
        if len(leftContours) > 0 or len(rightContours) > 0:
            self.lastFoundRed = rospy.Time.now()
            foundRed = True
        else:
            if self.lastFoundRed is not None and rospy.Time.now(
            ) - self.lastFoundRed < self.redHistoryDuration:
                foundRed = True
            else:
                foundRed = False

        toolPose = tfx.pose([0, 0, 0],
                            frame=self.tool_frame,
                            stamp=rospy.Time.now()).msg.PoseStamped()
        if foundRed:
            self.red_thresh_marker_pub.publish(
                createMarker(toolPose, color="red"))
        else:
            self.red_thresh_marker_pub.publish(
                createMarker(toolPose, color="blue"))

        self.red_thresh_pub.publish(Bool(foundRed))
Example #15
0
    def find(self, img):
        started = time.time()
        gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1)
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1)
        sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1)

        cv.Sobel(gray, sobel, 1, 0)
        cv.Sobel(gray, sobely, 0, 1)
        cv.Add(sobel, sobely, sobel)

        sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1)
        absnorm8(sobel, sobel8)
        cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY)

        sobel_integral = self.Cached('sobel_integral', img.height + 1,
                                     img.width + 1, cv.CV_32SC1)
        cv.Integral(sobel8, sobel_integral)

        d = 16
        _x1y1 = cv.GetSubRect(
            sobel_integral,
            (0, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x1y2 = cv.GetSubRect(
            sobel_integral,
            (0, d, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y1 = cv.GetSubRect(
            sobel_integral,
            (d, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y2 = cv.GetSubRect(
            sobel_integral,
            (d, d, sobel_integral.cols - d, sobel_integral.rows - d))

        summation = cv.CloneMat(_x2y2)
        cv.Sub(summation, _x1y2, summation)
        cv.Sub(summation, _x2y1, summation)
        cv.Add(summation, _x1y1, summation)
        sum8 = self.Cached('sum8', summation.height, summation.width,
                           cv.CV_8UC1)
        absnorm8(summation, sum8)
        cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY)

        cv.ShowImage("sum8", sum8)
        seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL)
        subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows))
        t_cull = time.time() - started

        seqs = []
        while seq:
            seqs.append(seq)
            seq = seq.h_next()

        started = time.time()
        found = {}
        print 'seqs', len(seqs)
        for seq in seqs:
            area = cv.ContourArea(seq)
            if area > 1000:
                rect = cv.BoundingRect(seq)
                edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5)
                candidate = cv.GetSubRect(subimg, rect)
                sym = self.dm.decode(
                    candidate.width,
                    candidate.height,
                    buffer(candidate.tostring()),
                    max_count=1,
                    #min_edge = 6,
                    #max_edge = int(edge)      # Units of 2 pixels
                )
                if sym:
                    onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y)
                                for (x, y) in self.dm.stats(1)[1]]
                    found[sym] = onscreen
                else:
                    print "FAILED"
        t_brute = time.time() - started
        print "cull took", t_cull, "brute", t_brute
        return found
Example #16
0
              math.pi * deg / 180, r) for deg in range(0, 90, 10)]

for (msg, (x, y), angle, r) in test:
    map = cv.CreateMat(2, 3, cv.CV_32FC1)
    corners = [(x + r * math.cos(angle + th), y + r * math.sin(angle + th))
               for th in [0, math.pi / 2, math.pi, 3 * math.pi / 4]]
    src = mkdmtx(msg)
    (sx, sy) = cv.GetSize(src)
    cv.GetAffineTransform([(0, 0), (sx, 0), (sx, sy)], corners[:3], map)
    temp = cv.CreateMat(bg.rows, bg.cols, cv.CV_8UC3)
    cv.Set(temp, cv.RGB(0, 0, 0))
    cv.WarpAffine(src, temp, map)
    cv.Or(temp, bg, bg)

cv.ShowImage("comp", bg)
scribble = cv.CloneMat(bg)

if 0:
    for i in range(10):
        df.find(bg)

for (sym, coords) in df.find(bg).items():
    print sym
    cv.PolyLine(scribble, [coords],
                1,
                cv.CV_RGB(255, 0, 0),
                1,
                lineType=cv.CV_AA)
    Xs = [x for (x, y) in coords]
    Ys = [y for (x, y) in coords]
    where = ((min(Xs) + max(Xs)) / 2, max(Ys) - 50)
Example #17
0
class image_converter:
    def __init__(self):
        self.image_pub = rospy.Publisher("image_converter1", Image)

        cv.NamedWindow("Image window", 1)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("ardrone/image_raw", Image,
                                          self.callback)

    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
        except CvBridgeError, e:
            print e

        #(cols,rows) = cv.GetSize(cv_image)
        #if cols > 60 and rows > 60 :
        #  cv.Circle(cv_image, (50,50), 10, 255)

        #######################################
        #img1=cv.CreateImage((150,150),8,3)
        #cv.Rectangle(cv_image, (60,60),(70,90), (255,0,255))
        #sub1=cv.GetSubRect(cv_image,(60,60,150,150))
        #save1=cv.CloneMat(sub1)
        #sub2=cv.GetSubRect(img1,(0,0,150,150))
        #cv.Copy(save1,sub2)
        storage = cv.CreateMemStorage(0)
        img = cv.CreateImage(
            (cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]), 8, 3)
        img1 = cv.CreateImage(
            (cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]), 8, 3)
        #img=cv.CreateImage(cv.GetSize(cv_image),8,3)
        img_r = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_g = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_b = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_g1 = cv.CreateImage(cv.GetSize(img), 8, 1)
        img_g2 = cv.CreateImage(cv.GetSize(img), 8, 1)

        img2 = cv.LoadImage("/home/petin/catkin_ws/src/vp_ardrone2/ris1.jpg",
                            cv.CV_LOAD_IMAGE_GRAYSCALE)

        sub1 = cv.GetSubRect(
            cv_image, (0, 0, cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]))
        save1 = cv.CloneMat(sub1)
        sub2 = cv.GetSubRect(
            img, (0, 0, cv.GetSize(cv_image)[1], cv.GetSize(cv_image)[1]))
        cv.Copy(save1, sub2)

        #cv.CvtColor(img, img1, cv.CV_BGR2HSV)
        #cv.CvtPixToPlane(img1,img_h,img_s,img_v,None)
        #cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
        #cv.Smooth(gray,gray,cv.CV_GAUSSIAN,5,5)
        cv.Split(img, img_b, img_g, img_r, None)
        #
        #cv.ShowImage("Image window1", img)
        #cv.ShowImage("Image windowb", img_b)
        #cv.ShowImage("Image windowg", img_g)
        #cv.ShowImage("Image windowr", img_r)
        #
        cv.InRangeS(img_g, cv.Scalar(180), cv.Scalar(255), img_g1)
        #cv.InRangeS(img_s, cv.Scalar(135), cv.Scalar(255), img_s1);
        #cv.InRangeS(img_b, cv.Scalar(0), cv.Scalar(61), img_b1);
        #cv.Invert(img_g1,img_g2,cv.CV_SVD)
        cv.Smooth(img2, img2, cv.CV_GAUSSIAN, 9, 9)
        #
        cv.ShowImage("Image windowh1", img_g1)
        #cv.ShowImage("Image windowg1", img_h1)
        #cv.ShowImage("Image windowr1", img_r1)
        #cv.ShowImage("Image gray", gray)
        # search circle
        storage = cv.CreateMat(img2.width, 1, cv.CV_32FC3)
        cv.ShowImage("Image window1", img2)
        cv.HoughCircles(img2, storage, cv.CV_HOUGH_GRADIENT, 2, 100, 100, 50,
                        10, 400)
        #rospy.loginfo(storage.width)
        for i in xrange(storage.width - 1):
            radius = storage[i, 2]
            center = (storage[i, 0], storage[i, 1])
            rospy.loginfo('444')
            cv.Circle(cv_image, center, radius, (0, 0, 255), 3, 10, 200)
        #search_circle=cv.HoughCircles(img_g1,np.asarray(storage),3,10,150)
        #for res in search_circles:
        #   p = float (cv.GetSeqElem(res))
        #   pt = cv.Point( cv.Round( p[0] ), cv.Round( p[1] ) );
        #   cv.Circle( cv_image, pt, cv.Round( p[2] ), 255 );
        #
        #cv.And(img_g,img_r,img_a)
        #cv.And(img_a,img_b,img_a)
        #
        cv.ShowImage("Image window", cv_image)
        cv.WaitKey(3)

        try:
            self.image_pub.publish(self.bridge.cv_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError, e:
            print e
    def OnIdle( self, ):
        """Request refresh of the context whenever idle.
        track, get position, update camera, then redraw"""
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
        backproject_mode = False
        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject( [self.hue], backproject, hist )
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x,y,w,h = self.selection
                cv.Rectangle(frame, (x,y), (x+w,y+h), (0,0,255))

                sel = cv.GetSubRect(self.hue, self.selection )
                cv.CalcArrHist( [sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )

# find centroid coordinate (x,y) and area (z)
                selection_centroid = track_box[0]
                global xposition
                xposition = selection_centroid[0]
                global yposition
                yposition = selection_centroid[1]
                width_height = track_box[1]


# writes output of coordinates to seed file if needed
                # with open('seed.txt', 'a') as f:
                #     value = (xposition, yposition)
                #     s = str(value) + '\n'
                #     f.write(s)
                #     # f.write('end_of_session')
                # f.close()

# print outs
                print "x: " + str(xposition)
                print "y: " + str(yposition)
                selection_area = width_height[0]*width_height[1]
                # print "The width is: " + str(width_height[0]) + " The height is: " + str(width_height[1])
                # print "centroid is: " + str(selection_centroid)
                # return "centroid is: " + str(selection_centroid)
                print "area: " + str(selection_area)
                # return "area is: " + str(selection_area)

            if not backproject_mode:
                cv.ShowImage( "CamShiftDemo", frame )
            else:
                cv.ShowImage( "CamShiftDemo", backproject)
            cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(10)
            if c == 27: # escape key
                break
            elif c == ord("b"): # show backproject mode with "b" key
                backproject_mode = not backproject_mode

        self.triggerRedraw(1)        
        return 1
exT = cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1)

# focal lengths have 1/1 ratio
intrinsics[0, 0] = 1.0
intrinsics[1, 1] = 1.0
cv.CalibrateCamera2(
    opts,
    ipts,
    npts,
    cv.GetSize(images[0]),
    intrinsics,
    distortion,
    exR,
    exT,
    flags=cv.CV_CALIB_ZERO_TANGENT_DIST)  #cv.CV_CALIB_ZERO_TANGENT_DIST) # 0)
print "D =", list(cvmat_iterator(distortion))
print "K =", list(cvmat_iterator(intrinsics))

print "R =", list(cvmat_iterator(exR))
print "T =", list(cvmat_iterator(exT))

mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)

cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)
for img in images:
    r = cv.CloneMat(img)
    cv.Remap(img, r, mapx, mapy)
    cv.ShowImage("snap", r)
    cv.WaitKey()
Example #20
0
import roslib
roslib.load_manifest('hai_sandbox')
import cv
import sys
import os.path as pt

img_path = sys.argv[1]
print 'loading', img_path
img = cv.LoadImageM(img_path)
dst = cv.CloneMat(img)
dif = cv.CloneMat(img)
cv.Smooth(img, dst, cv.CV_GAUSSIAN, 91)
cv.Sub(img, dst, dif)
cv.SaveImage(img_path, dif)

#orig_path, fname = pt.split(img_path)
#name = pt.splitext(fname)[0]
#pt.join(orig_path, name)
Example #21
0
class image_converter:

  def __init__(self):
    self.image_pub = rospy.Publisher("image_converter1",Image)

    cv.NamedWindow("Image window", 1)
    self.bridge = CvBridge()
    #self.image_sub = rospy.Subscriber("ardrone/bottom/image_raw",Image,self.callback)
    self.image_sub = rospy.Subscriber("usb_cam1/image_raw",Image,self.callback)

  def callback(self,data):
    try:
      cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
    except CvBridgeError, e:
      print e

    #(cols,rows) = cv.GetSize(cv_image)
    #if cols > 60 and rows > 60 :
    #  cv.Circle(cv_image, (50,50), 10, 255)

    #######################################
    #img1=cv.CreateImage((150,150),8,3)
    #cv.Rectangle(cv_image, (60,60),(70,90), (255,0,255))
    #sub1=cv.GetSubRect(cv_image,(60,60,150,150))
    #save1=cv.CloneMat(sub1)
    #sub2=cv.GetSubRect(img1,(0,0,150,150))
    #cv.Copy(save1,sub2)
    img=cv.CreateImage((cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]),8,3)    
    img1=cv.CreateImage((cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]),8,3)    
    #img=cv.CreateImage(cv.GetSize(cv_image),8,3)    
    img_r=cv.CreateImage(cv.GetSize(img),8,1)
    img_g=cv.CreateImage(cv.GetSize(img),8,1)
    img_b=cv.CreateImage(cv.GetSize(img),8,1)
    img_r1=cv.CreateImage(cv.GetSize(img),8,1)
    img_r2=cv.CreateImage(cv.GetSize(img),8,1)

    img2=cv.LoadImage("/home/petin/ros_pkgs/vp_ardrone2/ris1.jpg",cv.CV_LOAD_IMAGE_GRAYSCALE)
    
    
    sub1=cv.GetSubRect(cv_image,(0,0,cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]))
    save1=cv.CloneMat(sub1)
    sub2=cv.GetSubRect(img,(0,0,cv.GetSize(cv_image)[0],cv.GetSize(cv_image)[1]))
    cv.Copy(save1,sub2)

    #cv.CvtColor(img, img1, cv.CV_BGR2HSV)
    #cv.CvtPixToPlane(img1,img_h,img_s,img_v,None)
    #cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
    #cv.Smooth(gray,gray,cv.CV_GAUSSIAN,5,5)
    cv.Split(img,img_b,img_g,img_r,None)
    #
    #cv.ShowImage("Image window1", img)
    #cv.ShowImage("Image windowb", img_b)
    #cv.ShowImage("Image windowg", img_g)
    #cv.ShowImage("Image windowr", img_r)
    #
    cv.InRangeS(img_r, cv.Scalar(0), cv.Scalar(120), img_r1);
    #cv.InRangeS(img_s, cv.Scalar(135), cv.Scalar(255), img_s1);
    #cv.InRangeS(img_b, cv.Scalar(0), cv.Scalar(61), img_b1);
    #cv.Invert(img_g1,img_g2,cv.CV_SVD)
    ##cv.Smooth(img_g,img_g,cv.CV_GAUSSIAN,9,9)
    #
    #cv.ShowImage("Image windowg1", img_g1)
    #
    ##storage=cv.CreateMemStorage(0)
    ##contours = cv.FindContours (img_r1, storage, cv.CV_CHAIN_APPROX_NONE)
    ##print len(contours)
    cv.ShowImage("Image windowg1", img_r1)
    #for contour in contours.hrange():
    #  size = abs(cvContourArea(contour))
    #  is_convex = cvCheckContourConvexity(contour)
    #  bbox = cvBoundingRect( contour, 0 )
    #  x, y = bbox.x+bbox.width*0.5, bbox.y+bbox.height*0.5
    #  print (x,y)
    ##print(8888888888888888888888888888888888888888888888888888888888)
    ##for (x,y) in contours:
    ##  print (x,y)
    #cv.ShowImage("Image windowg1", img_h1)
    #cv.ShowImage("Image windowr1", img_r1)
    #cv.ShowImage("Image gray", gray)
    # search circle
    #storage = cv.CreateMat(img2.width, 1, cv.CV_32FC3)
    #cv.ShowImage("Image window1", img2)
    #cv.HoughCircles(img2, storage, cv.CV_HOUGH_GRADIENT, 2, 100, 100, 50, 10, 400)
    #rospy.loginfo(len()np.asarray(storage))
    #for i in xrange(storage.width - 1):
    #for i in xrange(storage.width - 1):
    #  radius = storage[i, 2]
    #  center = (storage[i, 0], storage[i, 1])
    #  rospy.loginfo('444')
    #  cv.Circle(cv_image, center, radius, (0, 0, 255), 3, 10, 200)
    #search_circle=cv.HoughCircles(img_g1,np.asarray(storage),3,10,150)
    #for res in search_circles:
    #   p = float (cv.GetSeqElem(res))
    #   pt = cv.Point( cv.Round( p[0] ), cv.Round( p[1] ) );
    #   cv.Circle( cv_image, pt, cv.Round( p[2] ), 255 );
    #
    #cv.And(img_g,img_r,img_a)
    #cv.And(img_a,img_b,img_a)
    cv.ShowImage("Image window2", img2)
     #
    cv.ShowImage("Image window", cv_image)
    cv.WaitKey(3)

    try:
      self.image_pub.publish(self.bridge.cv_to_imgmsg(cv_image, "bgr8"))
    except CvBridgeError, e:
      print e
def thinning(img):
    img = cv.CloneMat(img)
    # this contains a list of CvPoints that are marked by sub iterations to be deleted
    W, H = cv.GetSize(img)
    i = 0
    while i < 1:
        i += 1
        marked = []
        # sub-iteration 1
        for y in range(H):
            for x in range(W):
                if img[y, x] == 0:
                    if connectivity(img, y, x) == 1:
                        neighbors = [
                            img[y - 1, x], img[y - 1, x + 1], img[y, x + 1],
                            img[y + 1, x + 1], img[y + 1, x],
                            img[y + 1, x - 1], img[y, x - 1], img[y - 1, x - 1]
                        ]
                        object_neighbors = 8 - int(sum(neighbors) / 255)
                        if object_neighbors >= 2 and object_neighbors <= 6:
                            if sum([
                                    img[y - 1, x], img[y, x + 1], img[y + 1, x]
                            ]) >= 255:  #at least one background (255)
                                if sum([
                                        img[y, x + 1], img[y + 1, x],
                                        img[y, x - 1]
                                ]) >= 255:  #at leat one background (255)
                                    marked.append((y, x))

        if len(marked) == 0:
            return img
        for (y, x) in marked:
            img[y, x] = 255

        # sub-iteration 2
        marked = []
        for y in range(H):
            for x in range(W):
                if img[y, x] == 0:
                    if connectivity(img, y, x) == 1:
                        neighbors = [
                            img[y - 1, x], img[y - 1, x + 1], img[y, x + 1],
                            img[y + 1, x + 1], img[y + 1, x],
                            img[y + 1, x - 1], img[y, x - 1], img[y - 1, x - 1]
                        ]
                        object_neighbors = 8 - int(sum(neighbors) / 255)
                        if object_neighbors >= 2 and object_neighbors <= 6:
                            if sum([
                                    img[y - 1, x], img[y, x + 1], img[y, x - 1]
                            ]) >= 255:  #at least one background (255)
                                if sum([
                                        img[y - 1, x], img[y + 1, x],
                                        img[y, x - 1]
                                ]) >= 255:  #at leat one background (255)
                                    marked.append((y, x))
        if len(marked) == 0:
            return img
        for (y, x) in marked:
            img[y, x] = 255

    return img