Пример #1
0
    def rotate(self, angle):
        """
        Rotate the blob given the angle in degrees most of the blob elements will
        be rotated, not however this will "break" drawing back to the original image.
        To draw the blob create a new layer and draw to that layer.

        Parameters:
            angle - Int or Float
            
        """
        #FIXME: This function should return a blob
        theta = 2 * np.pi * (angle / 360.0)
        mode = ""
        point = (self.x, self.y)
        self.mImg = self.mImg.rotate(angle, mode, point)
        #this is a bit of a hack, but it saves a lot of code
        #I left masks as bitmaps grrrr
        tempMask = Image(self.mMask)
        self.mMask = tempMask.rotate(angle, mode, point).getBitmap()

        tempMask = Image(self.mHullMask)
        self.mHullMask = tempMask.rotate(angle, mode, point).getBitmap()

        #self.mMask.rotate(theta,"",(self.x,self.y))
        #self.mHullMask.rotate(theta,"",(self.x,self.y))
        self.mContour = map(
            lambda x: (x[0] * np.cos(theta) - x[1] * np.sin(theta), x[0] * np.
                       sin(theta) + x[1] * np.cos(theta)), self.mContour)
        self.mConvexHull = map(
            lambda x: (x[0] * np.cos(theta) - x[1] * np.sin(theta), x[0] * np.
                       sin(theta) + x[1] * np.cos(theta)), self.mConvexHull)

        if (self.mHoleContour is not None):
            for h in self.mHoleContour:
                h = map(
                    lambda x: (x[0] * np.cos(theta) - x[1] * np.sin(theta), x[
                        0] * np.sin(theta) + x[1] * np.cos(theta)), h)
Пример #2
0
 def _codebook2Img(self, cb, patchsize, count, patch_arrangement, spacersz):
     """
     cb = the codebook
     patchsize = the patch size (ususally 11x11)
     count = total codes
     patch_arrangement = how are the patches grided in the image (eg 128 = (8x16) 256=(16x16) )
     spacersz = the number of pixels between patches
     """
     w = (patchsize[0] * patch_arrangement[0]) + (
         (patch_arrangement[0] + 1) * spacersz)
     h = (patchsize[1] * patch_arrangement[1]) + (
         (patch_arrangement[1] + 1) * spacersz)
     bm = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
     cv.Zero(bm)
     img = Image(bm)
     count = 0
     for widx in range(patch_arrangement[0]):
         for hidx in range(patch_arrangement[1]):
             x = (widx * patchsize[0]) + ((widx + 1) * spacersz)
             y = (hidx * patchsize[1]) + ((hidx + 1) * spacersz)
             temp = Image(cb[count, :].reshape(patchsize[0], patchsize[1]))
             img.blit(temp, pos=(x, y))
             count = count + 1
     return img
Пример #3
0
    def getImage(self):
        """
        Retrieve an Image-object from the camera.  If you experience problems
        with stale frames from the camera's hardware buffer, increase the flushcache
        number to dequeue multiple frames before retrieval

        We're working on how to solve this problem.
        """
        if (not self.threaded):
            cv.GrabFrame(self.capture)

        frame = cv.RetrieveFrame(self.capture)
        newimg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
        cv.Copy(frame, newimg)
        return Image(newimg, self)
Пример #4
0
 def getFullHullMaskedImage(self):
     """
     Get the full size image with the masked to the blob
     """
     retVal = cv.CreateImage((self.image.width, self.image.height),
                             cv.IPL_DEPTH_8U, 3)
     cv.Zero(retVal)
     bmp = self.image.getBitmap()
     mask = self.mHullMask.getBitmap()
     tl = self.topLeftCorner()
     cv.SetImageROI(retVal, (tl[0], tl[1], self.width(), self.height()))
     cv.SetImageROI(bmp, (tl[0], tl[1], self.width(), self.height()))
     cv.Copy(bmp, retVal, mask)
     cv.ResetImageROI(bmp)
     cv.ResetImageROI(retVal)
     return Image(retVal)
Пример #5
0
    def draw(self, color=Color.GREEN, alpha=-1, width=-1, layer=None):
        """
        Draw the blob, in the given color, to the appropriate layer
        
        By default, this draws the entire blob filled in, with holes.  If you
        provide a width, an outline of the exterior and interior contours is drawn.
        
        color = The color to render the blob.
        alpha = The alpha value of the rendered blob.
        width = The width of the drawn blob in pixels, if -1 then filled then the polygon is filled.
        layer = if layer is not None, the blob is rendered to the layer versus the source image.

        Parameters:
            color - Color object or Color tuple
            alpha - Int
            width - Int
            layer - DrawingLayer
        """
        if not layer:
            layer = self.image.dl()

        if width == -1:
            #copy the mask into 3 channels and multiply by the appropriate color
            maskred = cv.CreateImage(cv.GetSize(self.mMask), cv.IPL_DEPTH_8U,
                                     1)
            maskgrn = cv.CreateImage(cv.GetSize(self.mMask), cv.IPL_DEPTH_8U,
                                     1)
            maskblu = cv.CreateImage(cv.GetSize(self.mMask), cv.IPL_DEPTH_8U,
                                     1)

            maskbit = cv.CreateImage(cv.GetSize(self.mMask), cv.IPL_DEPTH_8U,
                                     3)

            cv.ConvertScale(self.mMask, maskred, color[0] / 255.0)
            cv.ConvertScale(self.mMask, maskgrn, color[1] / 255.0)
            cv.ConvertScale(self.mMask, maskblu, color[2] / 255.0)

            cv.Merge(maskblu, maskgrn, maskred, None, maskbit)

            masksurface = Image(maskbit).getPGSurface()
            masksurface.set_colorkey(Color.BLACK)
            if alpha != -1:
                masksurface.set_alpha(alpha)
            layer._mSurface.blit(masksurface, self.points[0])
        else:
            self.drawOutline(color, alpha, width, layer)
            self.drawHoles(color, alpha, width, layer)
Пример #6
0
    def getImage(self):
        """
        **SUMMARY**

        Return the current frame of the JpegStream being monitored

        """
        if not self.camthread._threadcapturetime:
            now = time.time()
            while not self.camthread._threadcapturetime:
                if time.time() - now > 5:
                    warnings.warn("Timeout fetching JpegStream at " + self.url)
                    return
                time.sleep(0.1)

        self.capturetime = self.camthread._threadcapturetime
        return Image(pil.open(StringIO(self.camthread.currentframe)), self)
Пример #7
0
    def getFullHullMaskedImage(self):
        """
        Get the full size image with the masked to the blob
        """
        tl = self.topLeftCorner()
        retVal = np.zeros((self.image.height, self.image.width, 3), np.uint8)
        npimgcrop = self.image.getNumpy()[tl[1]:tl[1] + self.height(),
                                          tl[0]:tl[0] + self.width()]
        mask = self.mHullMask.getGrayNumpy()
        retValcrop = retVal[tl[1]:tl[1] + self.height(),
                            tl[0]:tl[0] + self.width()]

        Image._copyNpwithMask(npimgcrop, retValcrop, mask)

        retVal[tl[1]:tl[1] + self.height(),
               tl[0]:tl[0] + self.width()] = retValcrop

        return Image(retVal)
Пример #8
0
    def _testPath(self, path, className, dataset, subset, disp, verbose):
        count = 0
        correct = 0
        badFeat = False
        files = []
        for ext in IMAGE_FORMATS:
            files.extend(glob.glob(os.path.join(path, ext)))
        if (subset > 0):
            nfiles = min(subset, len(files))
        else:
            nfiles = len(files)
        for i in range(nfiles):
            infile = files[i]
            if verbose:
                print "Opening file: " + infile
            img = Image(infile)
            featureVector = []
            for extractor in self.mFeatureExtractors:
                feats = extractor.extract(img)
                if (feats is not None):
                    featureVector.extend(feats)
                else:
                    badFeat = True
            if (badFeat):
                del img
                badFeat = False
                continue
            featureVector.extend([className])
            dataset.append(featureVector)
            test = orange.ExampleTable(self.mOrangeDomain, [featureVector])
            c = self.mClassifier(test[0])
            testClass = test[0].getclass()
            if (testClass == c):
                text = "Classified as " + str(c)
                self._WriteText(disp, img, text, Color.GREEN)
                correct = correct + 1
            else:
                text = "Mislassified as " + str(c)
                self._WriteText(disp, img, text, Color.RED)
            count = count + 1
            del img

        return ([dataset, count, correct])
Пример #9
0
    def getImage(self):
        """
        **SUMMARY**
        
        This method returns the Kinect camera image. 

        **RETURNS**
        
        The Kinect's color camera image. 

        **EXAMPLE**

        >>> k = Kinect()
        >>> while True:
        >>>   k.getImage().show()

        """
        video = freenect.sync_get_video()[0]
        self.capturetime = time.time()
        #video = video[:, :, ::-1]  # RGB -> BGR
        return Image(video.transpose([1, 0, 2]), self)
Пример #10
0
    def getImage(self):
        """
        **SUMMARY**

        Get the SimpleCV Image of the filter

        **RETURNS**

        Image of the filter.

        **EXAMPLE**

        >>> notch = DFT.createNotchFilter(dia1=200, cen=(200, 200),
                                          size=(512, 512), type="highpass")
        >>> notch.getImage().show()
        """
        if isinstance(self._image, type(None)):
            if isinstance(self._numpy, type(None)):
                warnings.warn("Filter doesn't contain any image")
            self._image = Image(self._numpy)
        return self._image
Пример #11
0
    def stackFilters(self, flt1, flt2):
        """
        **SUMMARY**

        Stack three signle channel filters of the same size to create
        a 3 channel filter.

        **PARAMETERS**

        * *flt1* - second filter to be stacked
        * *flt2* - thrid filter to be stacked

        **RETURNS**

        DFT filter

        **EXAMPLE**

        >>> flt1 = DFT.createGaussianFilter(dia=200, size=(380, 240))
        >>> flt2 = DFT.createGaussianFilter(dia=100, size=(380, 240))
        >>> flt2 = DFT.createGaussianFilter(dia=70, size=(380, 240))
        >>> flt = flt1.stackFilters(flt2, flt3) # 3 channel filter
        """
        if not (self.channels == 1 and flt1.channels == 1
                and flt2.channels == 1):
            warnings.warn("Filters must have only 1 channel")
            return None
        if not (self.size() == flt1.size() and self.size() == flt2.size()):
            warnings.warn("All the filters must be of same size")
            return None
        numpyflt = self._numpy
        numpyflt1 = flt1._numpy
        numpyflt2 = flt2._numpy
        flt = np.dstack((numpyflt, numpyflt1, numpyflt2))
        img = Image(flt)
        stackedfilter = DFT(size=self.size(),
                            numpyarray=flt,
                            image=img,
                            channels=3)
        return stackedfilter
Пример #12
0
    def applyFilter(self, image, grayscale=False):
        """
        **SUMMARY**

        Apply the DFT filter to given image.

        **PARAMETERS**

        * *image*     - SimpleCV.Image image
        * *grayscale* - if this value is True we perfrom the operation on the 
                        DFT of the gray version of the image and the result is
                        gray image. If grayscale is true we perform the 
                        operation on each channel and the recombine them to
                        create the result.

        **RETURNS**

        Filtered Image.

        **EXAMPLE**

        >>> notch = DFT.createNotchFilter(dia1=200, cen=(200, 200),
                                          size=(512, 512), type="highpass")
        >>> img = Image('lenna')
        >>> notch.applyFilter(img).show()
        """

        if self.width == 0 or self.height == 0:
            warnings.warn("Empty Filter. Returning the image.")
            return image
        w, h = image.size()
        if grayscale:
            image = image.toGray()
            print self._numpy.dtype, "gray"
        fltImg = Image(self._numpy)
        if fltImg.size() != image.size():
            fltImg = fltImg.resize(w, h)
        filteredImage = image.applyDFTFilter(fltImg, grayscale)
        return filteredImage
Пример #13
0
    def undistort(self, image_or_2darray):
        """
        If given an image, apply the undistortion given my the camera's matrix and return the result
        
        If given a 1xN 2D cvmat or a 2xN numpy array, it will un-distort points of
        measurement and return them in the original coordinate system.
        
        """
        if (type(self._calibMat) != cv.cvmat
                or type(self._distCoeff) != cv.cvmat):
            warnings.warn(
                "FrameSource.undistort: This operation requires calibration, please load the calibration matrix"
            )
            return None

        if (type(image_or_2darray) == InstanceType
                and image_or_2darray.__class__ == Image):
            inImg = image_or_2darray  # we have an image
            retVal = inImg.getEmpty()
            cv.Undistort2(inImg.getBitmap(), retVal, self._calibMat,
                          self._distCoeff)
            return Image(retVal)
        else:
            mat = ''
            if (type(image_or_2darray) == cv.cvmat):
                mat = image_or_2darray
            else:
                arr = cv.fromarray(np.array(image_or_2darray))
                mat = cv.CreateMat(cv.GetSize(arr)[1], 1, cv.CV_64FC2)
                cv.Merge(arr[:, 0], arr[:, 1], None, None, mat)

            upoints = cv.CreateMat(cv.GetSize(mat)[1], 1, cv.CV_64FC2)
            cv.UndistortPoints(mat, upoints, self._calibMat, self._distCoeff)

            #undistorted.x = (x* focalX + principalX);
            #undistorted.y = (y* focalY + principalY);
            return (np.array(upoints[:, 0]) *\
                [self.getCameraMatrix()[0, 0], self.getCameraMatrix()[1, 1]] +\
                [self.getCameraMatrix()[0, 2], self.getCameraMatrix()[1, 2]])[:, 0]
Пример #14
0
    def invert(self):
        """
        **SUMMARY**

        Invert the filter. All values will be subtracted from 255.

        **RETURNS**

        Inverted Filter

        **EXAMPLE**

        >>> flt = DFT.createGaussianFilter()
        >>> invertflt = flt.invert()
        """

        flt = self._numpy
        flt = 255 - flt
        img = Image(flt)
        invertedfilter = DFT(numpyarray=flt, image=img,
                             size=self.size(), type=self._type)
        invertedfilter._updateParams(self)
        return invertedfilter
Пример #15
0
    def mMask(self):
        # TODO: FIX THIS SO THAT THE INTERIOR CONTOURS GET SHIFTED AND DRAWN

        #Alas - OpenCV does not provide an offset in the fillpoly method for
        #the cv bindings (only cv2 -- which I am trying to avoid). Have to
        #manually do the offset for the ROI shift.
        retVal = np.zeros((self.height(), self.width()), np.uint8)
        l, t = self.topLeftCorner()

        # construct the exterior contour - these are tuples

        cv2.fillPoly(
            retVal,
            np.array([[(p[0][0] - l, p[0][1] - t) for p in self.mContour]]),
            (255, 255, 255), 8)

        #construct the hole contoursb

        if self.mHoleContour is not None:
            holes = [h - (l, t) for h in self.mHoleContour]
            cv2.fillPoly(retVal, np.array(holes), (0, 0, 0), 8)

        return Image(retVal)
Пример #16
0
 def getImage(self):
     """
     Return the current frame of the JpegStream being monitored
     """
     return Image(pil.open(StringIO(self.camthread.currentframe)), self)
Пример #17
0
    def createNotchFilter(self,
                          dia1,
                          dia2=None,
                          cen=None,
                          size=(64, 64),
                          type="lowpass"):
        """
        **SUMMARY**

        Creates a disk shaped notch filter of given diameter at given center.

        **PARAMETERS**

        * *dia1*       -  int - diameter of the disk shaped notch
                       - list - provide a list of three diameters to create
                               a 3 channel filter
        * *dia2*       -  int - outer diameter of the disk shaped notch
                                used for bandpass filter
                       - list - provide a list of three diameters to create
                               a 3 channel filter
        * *cen*        - tuple (x, y) center of the disk shaped notch
                         if not provided, it will be at the center of the 
                         filter
        * *size*       - size of the filter (width, height)
        * *type*:      - lowpass or highpass filter

        **RETURNS**
        DFT notch filter

        **EXAMPLE**

        >>> notch = DFT.createNotchFilter(dia1=200, cen=(200, 200),
                                          size=(512, 512), type="highpass")
        >>> notch = DFT.createNotchFilter(dia1=200, dia2=300, cen=(200, 200),
                                          size=(512, 512))
        >>> img = Image('lenna')
        >>> notch.applyFilter(img).show()
        """
        if isinstance(dia1, list):
            if len(dia1) != 3 and len(dia1) != 1:
                warnings.warn("diameter list must be of size 1 or 3")
                return None

            if isinstance(dia2, list):
                if len(dia2) != 3 and len(dia2) != 1:
                    warnings.warn("diameter list must be of size 3 or 1")
                    return None
                if len(dia2) == 1:
                    dia2 = [dia2[0]] * len(dia1)
            else:
                dia2 = [dia2] * len(dia1)

            if isinstance(cen, list):
                if len(cen) != 3 and len(cen) != 1:
                    warnings.warn("center list must be of size 3 or 1")
                    return None
                if len(cen) == 1:
                    cen = [cen[0]] * len(dia1)
            else:
                cen = [cen] * len(dia1)

            stackedfilter = DFT()
            for d1, d2, c in zip(dia1, dia2, cen):
                stackedfilter = stackedfilter._stackFilters(
                    self.createNotchFilter(d1, d2, c, size, type))
            image = Image(stackedfilter._numpy)
            retVal = DFT(numpyarray=stackedfilter._numpy,
                         image=image,
                         dia=dia1 + dia2,
                         channels=len(dia1),
                         size=size,
                         type=stackedfilter._type,
                         frequency=stackedfilter._freqpass)
            return retVal

        w, h = size
        if cen is None:
            cen = (w / 2, h / 2)
        a, b = cen
        y, x = np.ogrid[-a:w - a, -b:h - b]
        r = dia1 / 2
        mask = x * x + y * y <= r * r
        flt = np.ones((w, h))
        flt[mask] = 255
        if type == "highpass":
            flt = 255 - flt
        if dia2 is not None:
            a, b = cen
            y, x = np.ogrid[-a:w - a, -b:h - b]
            r = dia2 / 2
            mask = x * x + y * y <= r * r
            flt1 = np.ones((w, h))
            flt1[mask] = 255
            flt1 = 255 - flt1
            flt = flt + flt1
            np.clip(flt, 0, 255)
            type = "bandpass"
        img = Image(flt)
        notchfilter = DFT(size=size,
                          numpyarray=flt,
                          image=img,
                          dia=dia1,
                          type="Notch",
                          frequency=type)
        return notchfilter
Пример #18
0
    def createBandpassFilter(self,
                             xCutoffLow,
                             xCutoffHigh,
                             yCutoffLow=None,
                             yCutoffHigh=None,
                             size=(64, 64)):
        """
        **SUMMARY**

        Creates a banf filter of given size and order.

        **PARAMETERS**

        * *xCutoffLow*    - int - horizontal lower cut off frequency
                          - list - provide a list of three cut off frequencies
        * *xCutoffHigh*   - int - horizontal higher cut off frequency
                          - list - provide a list of three cut off frequencies
        * *yCutoffLow*    - int - vertical lower cut off frequency
                          - list - provide a list of three cut off frequencies
        * *yCutoffHigh*   - int - verical higher cut off frequency
                          - list - provide a list of three cut off frequencies
                                   to create a 3 channel filter
        * *size*      - size of the filter (width, height)

        **RETURNS**

        DFT filter.

        **EXAMPLE**

        >>> flt = DFT.createBandpassFilter(xCutoffLow=75,
                                           xCutoffHigh=190, size=(320, 280))

        >>> flt = DFT.createBandpassFilter(xCutoffLow=[75],
                                           xCutoffHigh=[190], size=(320, 280))

        >>> flt = DFT.createBandpassFilter(xCutoffLow=[75, 120, 132],
                                           xCutoffHigh=[190, 210, 234],
                                           size=(320, 280))

        >>> flt = DFT.createBandpassFilter(xCutoffLow=75, xCutoffHigh=190,
                                           yCutoffLow=60, yCutoffHigh=210,
                                           size=(320, 280))

        >>> flt = DFT.createBandpassFilter(xCutoffLow=[75], xCutoffHigh=[190],
                                           yCutoffLow=[60], yCutoffHigh=[210],
                                           size=(320, 280))

        >>> flt = DFT.createBandpassFilter(xCutoffLow=[75, 120, 132],
                                           xCutoffHigh=[190, 210, 234], 
                                           yCutoffLow=[70, 110, 112], 
                                           yCutoffHigh=[180, 220, 220], 
                                           size=(320, 280))

        >>> img = Image('lenna')
        >>> flt.applyFilter(img).show()
        """
        lowpass = self.createLowpassFilter(xCutoffLow, yCutoffLow, size)
        highpass = self.createHighpassFilter(xCutoffHigh, yCutoffHigh, size)
        lowpassnumpy = lowpass._numpy
        highpassnumpy = highpass._numpy
        bandpassnumpy = lowpassnumpy + highpassnumpy
        bandpassnumpy = np.clip(bandpassnumpy, 0, 255)
        img = Image(bandpassnumpy)
        bandpassFilter = DFT(size=size,
                             image=img,
                             numpyarray=bandpassnumpy,
                             type="bandpass",
                             xCutoffLow=xCutoffLow,
                             yCutoffLow=yCutoffLow,
                             xCutoffHigh=xCutoffHigh,
                             yCutoffHigh=yCutoffHigh,
                             frequency="bandpass",
                             channels=lowpass.channels)
        return bandpassFilter
Пример #19
0
    def createHighpassFilter(self, xCutoff, yCutoff=None, size=(64, 64)):
        """
        **SUMMARY**

        Creates a highpass filter of given size and order.

        **PARAMETERS**

        * *xCutoff*       - int - horizontal cut off frequency
                          - list - provide a list of three cut off frequencies
                                   to create a 3 channel filter
        * *yCutoff*       - int - vertical cut off frequency
                          - list - provide a list of three cut off frequencies
                                   to create a 3 channel filter
        * *size*      - size of the filter (width, height)

        **RETURNS**

        DFT filter.

        **EXAMPLE**

        >>> flt = DFT.createHighpassFilter(xCutoff=75, size=(320, 280))

        >>> flt = DFT.createHighpassFilter(xCutoff=[75], size=(320, 280))

        >>> flt = DFT.createHighpassFilter(xCutoff=[75, 100, 120],
                                           size=(320, 280))

        >>> flt = DFT.createHighpassFilter(xCutoff=75, yCutoff=35, 
                                           size=(320, 280))

        >>> flt = DFT.createHighpassFilter(xCutoff=[75], yCutoff=[35],
                                           size=(320, 280))

        >>> flt = DFT.createHighpassFilter(xCutoff=[75, 100, 125], yCutoff=35,
                                           size=(320, 280))
        >>> # yCutoff will be [35, 35, 35]

        >>> flt = DFT.createHighpassFilter(xCutoff=[75, 113, 124],
                                           yCutoff=[35, 45, 90],
                                           size=(320, 280))

        >>> img = Image('lenna')
        >>> flt.applyFilter(img).show()
        """
        if isinstance(xCutoff, list):
            if len(xCutoff) != 3 and len(xCutoff) != 1:
                warnings.warn("xCutoff list must be of size 3 or 1")
                return None
            if isinstance(yCutoff, list):
                if len(yCutoff) != 3 and len(yCutoff) != 1:
                    warnings.warn("yCutoff list must be of size 3 or 1")
                    return None
                if len(yCutoff) == 1:
                    yCutoff = [yCutoff[0]] * len(xCutoff)
            else:
                yCutoff = [yCutoff] * len(xCutoff)
            stackedfilter = DFT()
            for xfreq, yfreq in zip(xCutoff, yCutoff):
                stackedfilter = stackedfilter._stackFilters(
                    self.createHighpassFilter(xfreq, yfreq, size))
            image = Image(stackedfilter._numpy)
            retVal = DFT(numpyarray=stackedfilter._numpy,
                         image=image,
                         xCutoffHigh=xCutoff,
                         yCutoffHigh=yCutoff,
                         channels=len(xCutoff),
                         size=size,
                         type=stackedfilter._type,
                         order=self._order,
                         frequency=stackedfilter._freqpass)
            return retVal

        lowpass = self.createLowpassFilter(xCutoff, yCutoff, size)
        w, h = lowpass.size()
        flt = lowpass._numpy
        flt = 255 - flt
        img = Image(flt)
        highpassFilter = DFT(size=size,
                             numpyarray=flt,
                             image=img,
                             type="Highpass",
                             xCutoffHigh=xCutoff,
                             yCutoffHigh=yCutoff,
                             frequency="highpass")
        return highpassFilter
Пример #20
0
    def createButterworthFilter(self,
                                dia=400,
                                size=(64, 64),
                                order=2,
                                highpass=False):
        """
        **SUMMARY**

        Creates a butterworth filter of given size and order.

        **PARAMETERS**

        * *dia*       - int - diameter of Gaussian filter
                      - list - provide a list of three diameters to create
                               a 3 channel filter
        * *size*      - size of the filter (width, height)
        * *order*     - order of the filter
        * *highpass*: -  bool 
                         True: highpass filter 
                         False: lowpass filter

        **RETURNS**

        DFT filter.

        **EXAMPLE**

        >>> flt = DFT.createButterworthfilter(100, (512, 512), order=3,
                                             highpass=True)
        >>> flt = DFT.createButterworthfilter([100, 120, 140], (512, 512),
                                             order=3, highpass=False)
        >>> img = Image('lenna')
        >>> flt.applyFilter(img).show()
        """
        if isinstance(dia, list):
            if len(dia) != 3 and len(dia) != 1:
                warnings.warn("diameter list must be of size 1 or 3")
                return None
            stackedfilter = DFT()
            for d in dia:
                stackedfilter = stackedfilter._stackFilters(
                    self.createButterworthFilter(d, size, order, highpass))
            image = Image(stackedfilter._numpy)
            retVal = DFT(numpyarray=stackedfilter._numpy,
                         image=image,
                         dia=dia,
                         channels=len(dia),
                         size=size,
                         type=stackedfilter._type,
                         order=order,
                         frequency=stackedfilter._freqpass)
            return retVal
        freqpass = "******"
        sz_x, sz_y = size
        x0 = sz_x / 2
        y0 = sz_y / 2
        X, Y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
        D = np.sqrt((X - x0)**2 + (Y - y0)**2)
        flt = 255 / (1.0 + (D / dia)**(order * 2))
        if highpass:
            frequency = "highpass"
            flt = 255 - flt
        img = Image(flt)
        retVal = DFT(size=size,
                     numpyarray=flt,
                     image=img,
                     dia=dia,
                     type="Butterworth",
                     frequency=freqpass)
        return retVal
Пример #21
0
    def generate(self,
                 imgdirs,
                 numcodes=128,
                 sz=(11, 11),
                 imgs_per_dir=50,
                 img_layout=(8, 16),
                 padding=0,
                 verbose=True):
        """
        This method builds the bag of features codebook from a list of directories
        with images in them. Each directory should be broken down by image class.
        
        * imgdirs: This list of directories.
        * patchsz: the dimensions of each codebook patch
        * numcodes: the number of different patches in the codebook.
        * imglayout: the shape of the resulting image in terms of patches - this must
          match the size of numcodes. I.e. numcodes == img_layout[0]*img_layout[1]
        * padding:the pixel padding of each patch in the resulting image.
        * imgs_per_dir: this method can use a specified number of images per directory
        * verbose: print output


        Once the method has completed it will save the results to a local file
        using the file name codebook.png 
        
        
        WARNING:

            THIS METHOD WILL TAKE FOREVER
        """
        if (numcodes != img_layout[0] * img_layout[1]):
            warnings.warn("Numcodes must match the size of image layout.")
            return None

        self.mPadding = padding
        self.mLayout = img_layout
        self.mNumCodes = numcodes
        self.mPatchSize = sz
        rawFeatures = np.zeros(sz[0] *
                               sz[1])  #fakeout numpy so we can use vstack
        for path in imgdirs:
            fcount = 0
            files = []
            for ext in IMAGE_FORMATS:
                files.extend(glob.glob(os.path.join(path, ext)))
            nimgs = min(len(files), imgs_per_dir)
            for i in range(nimgs):
                infile = files[i]
                if verbose:
                    print(path + " " + str(i) + " of " + str(imgs_per_dir))
                    print "Opening file: " + infile
                img = Image(infile)
                newFeat = self._getPatches(img, sz)
                if verbose:
                    print "     Got " + str(len(newFeat)) + " features."
                rawFeatures = np.vstack((rawFeatures, newFeat))
                del img
        rawFeatures = rawFeatures[
            1:, :]  # pop the fake value we put on the top
        if verbose:
            print "=================================="
            print "Got " + str(len(rawFeatures)) + " features "
            print "Doing K-Means .... this will take a long time"
        self.mCodebook = self._makeCodebook(rawFeatures, self.mNumCodes)
        self.mCodebookImg = self._codebook2Img(self.mCodebook, self.mPatchSize,
                                               self.mNumCodes, self.mLayout,
                                               self.mPadding)
        self.mCodebookImg.save('codebook.png')
Пример #22
0
    def _extractData(self,seq,color,minsize,maxsize):
        """
        Extract the bulk of the data from a give blob. If the blob's are is too large
        or too small the method returns none. 
        """
        if( seq is None or not len(seq)):
            return None
        area = cv.ContourArea(seq)
        if( area < minsize or area > maxsize):
            return None

        retVal = Blob()
        retVal.image = color 
        retVal.mArea = area
        
        retVal.mMinRectangle = cv.MinAreaRect2(seq)
        bb = cv.BoundingRect(seq)
        retVal.x = bb[0]+(bb[2]/2)
        retVal.y = bb[1]+(bb[3]/2)
        retVal.mPerimeter = cv.ArcLength(seq)
        if( seq is not None):  #KAS 
            retVal.mContour = list(seq)
            #retVal.points = list(seq) KAS 4/30

        # so this is a bit hacky....
     
        # For blobs that live right on the edge of the image OpenCV reports the position and width
        #   height as being one over for the true position. E.g. if a blob is at (0,0) OpenCV reports 
        #   its position as (1,1). Likewise the width and height for the other corners is reported as
        #   being one less than the width and height. This is a known bug. 

        xx = bb[0]
        yy = bb[1]
        ww = bb[2]
        hh = bb[3]
        retVal.points = [(xx,yy),(xx+ww,yy),(xx+ww,yy+hh),(xx,yy+hh)]
        retVal._updateExtents()
        chull = cv.ConvexHull2(seq,cv.CreateMemStorage(),return_points=1)
        retVal.mConvexHull = list(chull)
        hullMask = self._getHullMask(chull,bb)
        retVal.mHullImg = self._getBlobAsImage(chull,bb,color.getBitmap(),hullMask)
        retVal.mHullMask = Image(hullMask)
        
        del chull
        
        moments = cv.Moments(seq)

        #This is a hack for a python wrapper bug that was missing
        #the constants required from the ctype
        retVal.m00 = area
        try: 
            retVal.m10 = moments.m10
            retVal.m01 = moments.m01
            retVal.m11 = moments.m11
            retVal.m20 = moments.m20
            retVal.m02 = moments.m02
            retVal.m21 = moments.m21
            retVal.m12 = moments.m12
        except:
            retVal.m10 = cv.GetSpatialMoment(moments,1,0)
            retVal.m01 = cv.GetSpatialMoment(moments,0,1)
            retVal.m11 = cv.GetSpatialMoment(moments,1,1)
            retVal.m20 = cv.GetSpatialMoment(moments,2,0)
            retVal.m02 = cv.GetSpatialMoment(moments,0,2)
            retVal.m21 = cv.GetSpatialMoment(moments,2,1)
            retVal.m12 = cv.GetSpatialMoment(moments,1,2)
            
        retVal.mHu = cv.GetHuMoments(moments)
        mask = self._getMask(seq,bb)
        retVal.mMask = Image(mask)

        retVal.mAvgColor = self._getAvg(color.getBitmap(),bb,mask)
        retVal.mAvgColor = retVal.mAvgColor[0:3]
        #retVal.mAvgColor = self._getAvg(color.getBitmap(),retVal.mBoundingBox,mask)
        #retVal.mAvgColor = retVal.mAvgColor[0:3]
        retVal.mImg = self._getBlobAsImage(seq,bb,color.getBitmap(),mask)

        retVal.mHoleContour = self._getHoles(seq)
        retVal.mAspectRatio = retVal.mMinRectangle[1][0]/retVal.mMinRectangle[1][1]

        return retVal
Пример #23
0
 def getHullEdgeImage(self):
     retVal = np.zeros((self.height(), self.width(), 3), np.uint8)
     l, t = self.topLeftCorner()
     translate = [h - (l, t) for h in self.mHoleContour]
     cv2.polylines(retVal, np.array(translate), 1, (255, 255, 255))
     return Image(retVal)
Пример #24
0
 def getImage(self):
     video = freenect.sync_get_video()[0]
     #video = video[:, :, ::-1]  # RGB -> BGR
     return Image(video.transpose([1,0,2]), self)
Пример #25
0
 def __init__(self):
     self.mColorModel = ColorModel()
     self.mError = False
     self.mCurImg = Image()
     self.mTruthImg = Image()
     self.mBlobMaker = BlobMaker()
Пример #26
0
 def getFullHullEdgeImage(self):
     retVal = cv.CreateImage((self.image.width, self.image.height),
                             cv.IPL_DEPTH_8U, 3)
     cv.Zero(retVal)
     cv.PolyLine(retVal, [self.mConvexHull], 1, (255, 255, 255))
     return Image(retVal)
Пример #27
0
 def getFullHullEdgeImage(self):
     retVal = np.zeros((self.image.height, self.image.height, 3), np.uint8)
     cv2.polylines(retVal, self._mConvexHullnp, 1, (255, 255, 255))
     return Image(retVal)
Пример #28
0
    def draw(self, color=Color.GREEN, width=-1, alpha=-1, layer=None):
        """
        **SUMMARY**

        Draw the blob, in the given color, to the appropriate layer
        
        By default, this draws the entire blob filled in, with holes.  If you
        provide a width, an outline of the exterior and interior contours is drawn.
        
        **PARAMETERS**

        * *color* -The color to render the blob as a color tuple.
        * *alpha* - The alpha value of the rendered blob 0=transparent 255=opaque.
        * *width* - The width of the drawn blob in pixels, if -1 then filled then the polygon is filled.
        * *layer* - A source layer, if layer is not None, the blob is rendered to the layer versus the source image. 

        **RETURNS**
        
        This method either works on the original source image, or on the drawing layer provided. 
        The method does not modify object itself.

        **EXAMPLE**

        >>> img = Image("lenna")
        >>> blobs = img.findBlobs()
        >>> blobs[-2].draw(color=Color.PUCE,width=-1,alpha=128)
        >>> img.show()

        """
        if not layer:
            layer = self.image.dl()

        if width == -1:
            #copy the mask into 3 channels and multiply by the appropriate color
            maskred = cv.CreateImage(
                cv.GetSize(self.mMask._getGrayscaleBitmap()), cv.IPL_DEPTH_8U,
                1)
            maskgrn = cv.CreateImage(
                cv.GetSize(self.mMask._getGrayscaleBitmap()), cv.IPL_DEPTH_8U,
                1)
            maskblu = cv.CreateImage(
                cv.GetSize(self.mMask._getGrayscaleBitmap()), cv.IPL_DEPTH_8U,
                1)

            maskbit = cv.CreateImage(
                cv.GetSize(self.mMask._getGrayscaleBitmap()), cv.IPL_DEPTH_8U,
                3)

            cv.ConvertScale(self.mMask._getGrayscaleBitmap(), maskred,
                            color[0] / 255.0)
            cv.ConvertScale(self.mMask._getGrayscaleBitmap(), maskgrn,
                            color[1] / 255.0)
            cv.ConvertScale(self.mMask._getGrayscaleBitmap(), maskblu,
                            color[2] / 255.0)

            cv.Merge(maskblu, maskgrn, maskred, None, maskbit)

            masksurface = Image(maskbit).getPGSurface()
            masksurface.set_colorkey(Color.BLACK)
            if alpha != -1:
                masksurface.set_alpha(alpha)
            layer._mSurface.blit(masksurface, self.topLeftCorner())  #KAT HERE
        else:
            self.drawOutline(color, alpha, width, layer)
            self.drawHoles(color, alpha, width, layer)
Пример #29
0
    def train(self, images=None, labels=None, csvfile=None, delimiter=";"):
        """
        **SUMMARY**

        Train the face recognizer with images and labels.

        **PARAMETERS**

        * *images*    - A list of Images or ImageSet. All the images must be of
                        same size.
        * *labels*    - A list of labels(int) corresponding to the image in
                        images.
                        There must be at least two different labels.
        * *csvfile*   - You can also provide a csv file with image filenames
                        and labels instead of providing labels and images
                        separately.
        * *delimiter* - The delimiter used in csv files.

        **RETURNS**

        Nothing. None.

        **EXAMPLES**

        >>> f = FaceRecognizer()
        >>> imgs1 = ImageSet(path/to/images_of_type1)
        >>> labels1 = [0]*len(imgs1)
        >>> imgs2 = ImageSet(path/to/images_of_type2)
        >>> labels2 = [1]*len(imgs2)
        >>> imgs3 = ImageSet(path/to/images_of_type3)
        >>> labels3 = [2]*len(imgs3)
        >>> imgs = imgs1 + imgs2 + imgs3
        >>> labels = labels1 + labels2 + labels3
        >>> f.train(imgs, labels)
        >>> img = Image("some_image_of_any_of_the_above_type")
        >>> print f.predict(img)

        Save Fisher Training Data
        >>> f.save("trainingdata.xml")

        Load Fisher Training Data and directly use without trainging
        >>> f1 = FaceRecognizer()
        >>> f1.load("trainingdata.xml")
        >>> img = Image("some_image_of_any_of_the_above_type")
        >>> print f1.predict(img)

        Use CSV files for training
        >>> f = FaceRecognizer()
        >>> f.train(csvfile="CSV_file_name", delimiter=";")
        >>> img = Image("some_image_of_any_of_the_type_in_csv_file")
        >>> print f.predict(img)
        """
        if not self.supported:
            warnings.warn("Fisher Recognizer is supported by OpenCV >= 2.4.4")
            return None

        if csvfile:
            images = []
            labels = []
            import csv
            try:
                f = open(csvfile, "rb")
            except IOError:
                warnings.warn("No such file found. Training not initiated")
                return None

            self.csvfiles.append(csvfile)
            filereader = csv.reader(f, delimiter=delimiter)
            for row in filereader:
                images.append(Image(row[0]))
                labels.append(row[1])

        if isinstance(labels, type(None)):
            warnings.warn("Labels not provided. Training not inititated.")
            return None

        self.labels_set = list(set(labels))
        i = 0
        for label in self.labels_set:
            self.labels_dict.update({label: i})
            self.labels_dict_rev.update({i: label})
            i += 1

        if len(self.labels_set) < 2:
            warnings.warn("At least two classes/labels are required"
                          "for training. Training not inititated.")
            return None

        if len(images) != len(labels):
            warnings.warn("Mismatch in number of labels and number of"
                          "training images. Training not initiated.")
            return None

        self.imageSize = images[0].size()
        w, h = self.imageSize
        images = [
            img if img.size() == self.imageSize else img.resize(w, h)
            for img in images
        ]

        self.int_labels = [self.labels_dict[key] for key in labels]
        self.train_labels = labels
        labels = np.array(self.int_labels)
        self.train_imgs = images
        cv2imgs = [img.getGrayNumpy() for img in images]

        self.model.train(cv2imgs, labels)