예제 #1
0
    def extractFromBinary(self,
                          binaryImg,
                          colorImg,
                          minsize=5,
                          maxsize=-1,
                          appx_level=3):
        """
        This method performs blob extraction given a binary source image that is used
        to get the blob images, and a color source image.
        binarymg- The binary image with the blobs.
        colorImg - The color image.
        minSize  - The minimum size of the blobs in pixels.
        maxSize  - The maximum blob size in pixels.
        * *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the approximation edge - lower numbers yield better approximation.
        """
        #If you hit this recursion limit may god have mercy on your soul.
        #If you really are having problems set the value higher, but this means
        # you have over 10,000,000 blobs in your image.
        sys.setrecursionlimit(5000)
        #h_next moves to the next external contour
        #v_next() moves to the next internal contour
        if (maxsize <= 0):
            maxsize = colorImg.width * colorImg.height
        #binaryImg.show()
        retVal = []
        test = binaryImg.meanColor()
        if (test[0] == 0.00 and test[1] == 0.00 and test[2] == 0.00):
            return FeatureSet(retVal)

        # There are a couple of weird corner cases with the opencv
        # connect components libraries - when you try to find contours
        # in an all black image, or an image with a single white pixel
        # that sits on the edge of an image the whole thing explodes
        # this check catches those bugs. -KAS
        # Also I am submitting a bug report to Willow Garage - please bare with us.
        ptest = (4 * 255.0) / (binaryImg.width * binaryImg.height
                               )  # val if two pixels are white
        if (test[0] <= ptest and test[1] <= ptest and test[2] <= ptest):
            return retVal
        contourImage = binaryImg.toGray().getGrayNumpy()
        #print contourImage.shape, contourImage.dtype
        contours, hierarchy = cv2.findContours(contourImage, cv2.RETR_CCOMP,
                                               cv2.CHAIN_APPROX_SIMPLE)
        self.contours = copy(contours)
        self.hierarchy = copy(hierarchy)
        if not contours:
            warnings.warn("Unable to find Blobs. Retuning Empty FeatureSet.")
            return FeatureSet([])
        try:
            # note to self
            # http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/
            retVal = self._extractFromBinary(contours, hierarchy, colorImg,
                                             minsize, maxsize, appx_level)
        except RuntimeError, e:
            logger.warning(
                "You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image."
            )
예제 #2
0
    def getShapeContext(self):
        """
        Return the shape context descriptors as a featureset. Corrently
        this is not used for recognition but we will perhaps use it soon.
        """
        # still need to subsample big contours
        derp = self.getSCDescriptors()
        descriptors,completeContour = self.getSCDescriptors()
        fs = FeatureSet()
        for i in range(0,len(completeContour)):
            fs.append(ShapeContextDescriptor(self.image,completeContour[i],descriptors[i],self))

        return fs
예제 #3
0
    def getShapeContext(self):
        """
        Return the shape context descriptors as a featureset. Corrently
        this is not used for recognition but we will perhaps use it soon.
        """
        # still need to subsample big contours
        derp = self.getSCDescriptors()
        descriptors, completeContour = self.getSCDescriptors()
        fs = FeatureSet()
        for i in range(0, len(completeContour)):
            fs.append(
                ShapeContextDescriptor(self.image, completeContour[i],
                                       descriptors[i], self))

        return fs
예제 #4
0
    def extract(self,
                img,
                threshval=127,
                minsize=10,
                maxsize=0,
                threshblocksize=3,
                threshconstant=5):
        """
        This method performs a threshold operation on the input image and then
        extracts and returns the blobs.
        img       - The input image (color or b&w)
        threshval - The threshold value for the binarize operation. If threshval = -1 adaptive thresholding is used
        minsize   - The minimum blob size in pixels.
        maxsize   - The maximum blob size in pixels. 0=uses the default value.
        threshblocksize - The adaptive threhold block size.
        threshconstant  - The minimum to subtract off the adaptive threshold
        """
        if (maxsize <= 0):
            maxsize = img.width * img.height

        #create a single channel image, thresholded to parameters

        blobs = self.extractFromBinary(
            img.binarize(threshval, 255, threshblocksize,
                         threshconstant).invert(), img, minsize, maxsize)
        retVal = sorted(blobs, key=lambda x: x.mArea, reverse=True)
        return FeatureSet(retVal)
예제 #5
0
 def process(self, img):
     """
     Process the image. Return a featureset with a single
     PlayingCard feature or None
     """
     # Can we find anything that looks like a card
     card = self._findCardEdges(img)
     if (card is None):  # if we don't see it just bail
         warnings.warn("Could not find a card.")
         return None
     try:
         # extract the basic features and get color
         card = self._estimateColor(card)
         # okay, we got a color and some features
         # go ahead and estimate the suit
         card = self._estimateSuit(card)
         # Do we think this is a face card this
         # is an easier test
         isFace, card = self._isFaceCard(card)
         if (isFace):
             # if we are a face card get the face. This is had
             card = self._estimateFaceCard(card)
         else:
             # otherwise get the rank
             # first pass is corners second
             # pass is the card body
             card = self._estimateRank(card)
         # now go back do some sanity checks
         # and cleanup the features so it is not
         # too heavy
         card = self._refineEstimates(card)
     except CardError as ce:
         card = ce.card
         if (card is not None):
             # maybe we got a joker or someone
             # is being a jackass and showing us the
             # back of the card.
             card = self._isNonStandardCard(card)
         warnings.warn(ce.msg)  # we may swallow this later
         # optionally we may want to log these to
         # see where we fail and why or do a parameter
         # adjustment and try again
     except:
         # this means we had an error somewhere
         # else maybe numpy
         print("Generic Error.")
         return None
     return FeatureSet([card])
예제 #6
0
    def extractUsingModel(self, img, colormodel,minsize=10, maxsize=0):
        """
        Extract blobs using a color model
        img        - The input image
        colormodel - The color model to use.
        minsize    - The minimum size of the returned features.
        maxsize    - The maximum size of the returned features 0=uses the default value.

        Parameters:
            img - Image
            colormodel - ColorModel object
            minsize - Int
            maxsize - Int
        """
        if (maxsize <= 0):  
          maxsize = img.width * img.height 
        gray = colormodel.threshold(img)
        blobs = self.extractFromBinary(gray,img,minArea=minsize,maxArea=maxsize)
        retVal = sorted(blobs,key=lambda x: x.mArea, reverse=True)
        return FeatureSet(retVal)
예제 #7
0
    def extractFromBinary(self,binaryImg,colorImg, minsize = 5, maxsize = -1):
        """
        This method performs blob extraction given a binary source image that is used
        to get the blob images, and a color source image.
        binaryImg- The binary image with the blobs.
        colorImg - The color image.
        minSize  - The minimum size of the blobs in pixels.
        maxSize  - The maximum blob size in pixels. 
        """
        #If you hit this recursion limit may god have mercy on your soul.
        #If you really are having problems set the value higher, but this means
        # you have over 10,000,000 blobs in your image. 
        sys.setrecursionlimit(5000)
        #h_next moves to the next external contour
        #v_next() moves to the next internal contour
        if (maxsize <= 0):  
          maxsize = colorImg.width * colorImg.height 
          
        retVal = []
        test = binaryImg.meanColor()
        if( test[0]==0.00 and test[1]==0.00 and test[2]==0.00):
            return FeatureSet(retVal)

        # There are a couple of weird corner cases with the opencv
        # connect components libraries - when you try to find contours
        # in an all black image, or an image with a single white pixel
        # that sits on the edge of an image the whole thing explodes
        # this check catches those bugs. -KAS
        # Also I am submitting a bug report to Willow Garage - please bare with us. 
        ptest = 510.0/(binaryImg.width*binaryImg.height) # val if two pixels are white
        if( test[0]<ptest and test[1]<ptest and test[2]<ptest):
            return retVal 
        
        seq = cv.FindContours( binaryImg._getGrayscaleBitmap(), self.mMemStorage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
        
        try:
            # note to self
            # http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/
            retVal = self._extractFromBinary(seq,False,colorImg,minsize,maxsize)
        except RuntimeError,e:
            warnings.warn("You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image.")
예제 #8
0
                              cv.CV_CHAIN_APPROX_SIMPLE)
        try:
            # note to self
            # http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/
            retVal = self._extractFromBinary(seq, False, colorImg, minsize,
                                             maxsize, appx_level)
        except RuntimeError, e:
            logger.warning(
                "You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image."
            )
        except e:
            logger.warning(
                "SimpleCV Find Blobs Failed - This could be an OpenCV python binding issue"
            )
        del seq
        return FeatureSet(retVal)

    def _extractFromBinary(self, seq, isaHole, colorImg, minsize, maxsize,
                           appx_level):
        """
        The recursive entry point for the blob extraction. The blobs and holes are presented
        as a tree and we traverse up and across the tree. 
        """
        retVal = []

        if (seq is None):
            return retVal

        nextLayerDown = []
        while True:
            if (
예제 #9
0
class BlobMaker:
    """
    Blob maker encapsulates all of the contour extraction process and data, so
    it can be used inside the image class, or extended and used outside the image
    class. The general idea is that the blob maker provides the utilites that one
    would use for blob extraction. Later implementations may include tracking and
    other features.
    """
    contours = None
    hierarchy = None

    def __init__(self):
        pass

    def extractUsingModel(self, img, colormodel, minsize=10, maxsize=0):
        """
        Extract blobs using a color model
        img        - The input image
        colormodel - The color model to use.
        minsize    - The minimum size of the returned features.
        maxsize    - The maximum size of the returned features 0=uses the default value.

        Parameters:
            img - Image
            colormodel - ColorModel object
            minsize - Int
            maxsize - Int
        """
        if (maxsize <= 0):
            maxsize = img.width * img.height
        gray = colormodel.threshold(img)
        blobs = self.extractFromBinary(gray,
                                       img,
                                       minArea=minsize,
                                       maxArea=maxsize)
        retVal = sorted(blobs, key=lambda x: x.mArea, reverse=True)
        return FeatureSet(retVal)

    def extract(self,
                img,
                threshval=127,
                minsize=10,
                maxsize=0,
                threshblocksize=3,
                threshconstant=5):
        """
        This method performs a threshold operation on the input image and then
        extracts and returns the blobs.
        img       - The input image (color or b&w)
        threshval - The threshold value for the binarize operation. If threshval = -1 adaptive thresholding is used
        minsize   - The minimum blob size in pixels.
        maxsize   - The maximum blob size in pixels. 0=uses the default value.
        threshblocksize - The adaptive threhold block size.
        threshconstant  - The minimum to subtract off the adaptive threshold
        """
        if (maxsize <= 0):
            maxsize = img.width * img.height

        #create a single channel image, thresholded to parameters

        blobs = self.extractFromBinary(
            img.binarize(threshval, 255, threshblocksize,
                         threshconstant).invert(), img, minsize, maxsize)
        retVal = sorted(blobs, key=lambda x: x.mArea, reverse=True)
        return FeatureSet(retVal)

    def extractFromBinary(self,
                          binaryImg,
                          colorImg,
                          minsize=5,
                          maxsize=-1,
                          appx_level=3):
        """
        This method performs blob extraction given a binary source image that is used
        to get the blob images, and a color source image.
        binarymg- The binary image with the blobs.
        colorImg - The color image.
        minSize  - The minimum size of the blobs in pixels.
        maxSize  - The maximum blob size in pixels.
        * *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the approximation edge - lower numbers yield better approximation.
        """
        #If you hit this recursion limit may god have mercy on your soul.
        #If you really are having problems set the value higher, but this means
        # you have over 10,000,000 blobs in your image.
        sys.setrecursionlimit(5000)
        #h_next moves to the next external contour
        #v_next() moves to the next internal contour
        if (maxsize <= 0):
            maxsize = colorImg.width * colorImg.height
        #binaryImg.show()
        retVal = []
        test = binaryImg.meanColor()
        if (test[0] == 0.00 and test[1] == 0.00 and test[2] == 0.00):
            return FeatureSet(retVal)

        # There are a couple of weird corner cases with the opencv
        # connect components libraries - when you try to find contours
        # in an all black image, or an image with a single white pixel
        # that sits on the edge of an image the whole thing explodes
        # this check catches those bugs. -KAS
        # Also I am submitting a bug report to Willow Garage - please bare with us.
        ptest = (4 * 255.0) / (binaryImg.width * binaryImg.height
                               )  # val if two pixels are white
        if (test[0] <= ptest and test[1] <= ptest and test[2] <= ptest):
            return retVal
        contourImage = binaryImg.toGray().getGrayNumpy()
        #print contourImage.shape, contourImage.dtype
        contours, hierarchy = cv2.findContours(contourImage, cv2.RETR_CCOMP,
                                               cv2.CHAIN_APPROX_SIMPLE)
        self.contours = copy(contours)
        self.hierarchy = copy(hierarchy)
        if not contours:
            warnings.warn("Unable to find Blobs. Retuning Empty FeatureSet.")
            return FeatureSet([])
        try:
            # note to self
            # http://code.activestate.com/recipes/474088-tail-call-optimization-decorator/
            retVal = self._extractFromBinary(contours, hierarchy, colorImg,
                                             minsize, maxsize, appx_level)
        except RuntimeError, e:
            logger.warning(
                "You exceeded the recursion limit. This means you probably have too many blobs in your image. We suggest you do some morphological operations (erode/dilate) to reduce the number of blobs in your image. This function was designed to max out at about 5000 blobs per image."
            )
        return FeatureSet(retVal)
예제 #10
0
    def getConvexityDefects(self, returnPoints=False):
        """
        **SUMMARY**

        Get Convexity Defects of the contour.

        **PARAMETERS**

        *returnPoints* - Bool(False). 
                         If False: Returns FeatureSet of Line(start point, end point) 
                         and Corner(far point)
                         If True: Returns a list of tuples
                         (start point, end point, far point)
        **RETURNS**

        FeatureSet - A FeatureSet of Line and Corner objects
                     OR
                     A list of (start point, end point, far point)
                     See PARAMETERS.

        **EXAMPLE**

        >>> img = Image('lenna')
        >>> blobs = img.findBlobs()
        >>> blob = blobs[-1]
        >>> lines, farpoints = blob.getConvexityDefects()
        >>> lines.draw()
        >>> farpoints.draw(color=Color.RED, width=-1)
        >>> img.show()

        >>> points = blob.getConvexityDefects(returnPoints=True)
        >>> startpoints = zip(*points)[0]
        >>> endpoints = zip(*points)[0]
        >>> farpoints = zip(*points)[0]
        >>> print startpoints, endpoints, farpoints
        """
        def cvFallback():
            chull = cv.ConvexHull2(self.mContour,
                                   cv.CreateMemStorage(),
                                   return_points=False)
            defects = cv.ConvexityDefects(self.mContour, chull,
                                          cv.CreateMemStorage())
            points = [(defect[0], defect[1], defect[2]) for defect in defects]
            return points

        try:
            import cv2
            if hasattr(cv2, "convexityDefects"):
                #hull = [self.mContour.index(x) for x in self.mConvexHull]
                #hull = np.array(hull).reshape(len(hull), 1)
                #defects = cv2.convexityDefects(np.array(self.mContour), hull)
                hull = cv2.convexHull(self.mContour, returnPoints=False)
                defects = cv2.convexityDefects(self.mContour, hull)
                if isinstance(defects, type(None)):
                    warnings.warn(
                        "Unable to find defects. Returning Empty FeatureSet.")
                    defects = []
                points = [(tuple(self.mContour[defect[0][0]][0]),
                           tuple(self.mContour[defect[0][1]][0]),
                           tuple(self.mContour[defect[0][2]][0]))
                          for defect in defects]
            else:
                points = cvFallback()
        except ImportError:
            points = cvFallback()

        if returnPoints:
            return FeatureSet(points)
        else:
            lines = FeatureSet(
                [Line(self.image, (start, end)) for start, end, far in points])
            farpoints = FeatureSet([
                Corner(self.image, far[0], far[1])
                for start, end, far in points
            ])
            features = FeatureSet([lines, farpoints])
            return features