예제 #1
0
파일: Lines.py 프로젝트: yangzxstar/nupic
    def process(self, image):
        """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
        BaseFilter.process(self, image)
        s = min(image.size)
        sizeRange = [0, s]

        imageArray = numpy.array(image.split()[0].getdata())
        newImage = Image.new("LA", image.size)
        newImage.putdata([uint(p) for p in imageArray])
        newImage.putalpha(image.split()[1])
        for i in xrange(int(self.difficulty * self.maxLines)):
            # Generate random line
            start = (random.randint(sizeRange[0], sizeRange[1]),
                     random.randint(sizeRange[0], sizeRange[1]))
            end = (random.randint(sizeRange[0], sizeRange[1]),
                   random.randint(sizeRange[0], sizeRange[1]))

            # Generate random color
            color = random.randint(0, 255)

            # Add the line to the image
            draw = ImageDraw.Draw(newImage)
            draw.line((start, end), fill=color)

        return newImage
예제 #2
0
    def process(self, image):
        """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """

        BaseFilter.process(self, image)

        s = min(image.size)
        sizeRange = [int(0.1 * s), int(0.4 * s)]

        newArray = numpy.array(image.split()[0].getdata())
        newArray.resize(image.size[1], image.size[0])
        for j in xrange(self.numRectangles):
            # Generate random rectange
            size = (self.random.randint(sizeRange[0], sizeRange[1]),
                    self.random.randint(sizeRange[0], sizeRange[1]))
            loc = [
                self.random.randint(0, image.size[1]),
                self.random.randint(0, image.size[0])
            ]
            # Move the location so that the rectangle is centered on it
            loc[0] -= size[0] / 2
            loc[1] -= size[1] / 2
            # Generate random color
            color = self.random.randint(0, 255)
            # Add the rectangle to the image
            newArray[max(0,loc[0]):min(newArray.shape[0], loc[0]+size[0]), \
              max(0,loc[1]):min(newArray.shape[1],loc[1]+size[1])] = color
        newImage = Image.new("L", image.size)
        newImage.putdata([uint(p) for p in newArray.flatten()])
        newImage.putalpha(image.split()[1])
        return newImage
예제 #3
0
파일: Lines.py 프로젝트: nithindd/nupic
    def process(self, image):
        """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
        BaseFilter.process(self, image)
        s = min(image.size)
        sizeRange = [0, s]

        imageArray = numpy.array(image.split()[0].getdata())
        newImage = Image.new("LA", image.size)
        newImage.putdata([uint(p) for p in imageArray])
        newImage.putalpha(image.split()[1])
        for i in xrange(int(self.difficulty * self.maxLines)):
            # Generate random line
            start = (random.randint(sizeRange[0], sizeRange[1]), random.randint(sizeRange[0], sizeRange[1]))
            end = (random.randint(sizeRange[0], sizeRange[1]), random.randint(sizeRange[0], sizeRange[1]))

            # Generate random color
            color = random.randint(0, 255)

            # Add the line to the image
            draw = ImageDraw.Draw(newImage)
            draw.line((start, end), fill=color)

        return newImage
예제 #4
0
    def process(self, image):
        """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
        BaseFilter.process(self, image)
        #Create numpy array from image grayscale data and resize to image dimensions
        imageArray = numpy.array(image.split()[0].getdata())
        imageArray.resize(image.size[1], image.size[0])
        #Calculate offset from difficulty level
        offset = self.difficulty * (self.maxOffset)
        #Add random change to offset within window size
        halfWindowSize = 0.1 * offset
        offset = (offset -
                  halfWindowSize) + halfWindowSize * self.random.random() * (
                      (-1)**self.random.randint(1, 2))
        #Apply random direction
        offset *= ((-1)**self.random.randint(1, 2))
        imageArray += offset
        #Recreate PIL image
        newImage = Image.new("L", image.size)
        newImage.putdata([uint(p) for p in imageArray.flatten()])
        newImage.putalpha(image.split()[1])
        return newImage
예제 #5
0
  def process(self, image):
    """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """

    BaseFilter.process(self, image)

    s = min(image.size)
    sizeRange = [int(0.1 * s), int(0.4 * s)]

    newArray = numpy.array(image.split()[0].getdata())
    newArray.resize(image.size[1],image.size[0])
    for j in xrange(self.numRectangles):
      # Generate random rectange
      size = (self.random.randint(sizeRange[0], sizeRange[1]),
        self.random.randint(sizeRange[0], sizeRange[1]))
      loc = [self.random.randint(0,image.size[1]),
             self.random.randint(0,image.size[0])]
      # Move the location so that the rectangle is centered on it
      loc[0] -= size[0]/2
      loc[1] -= size[1]/2
      # Generate random color
      color = self.random.randint(0,255)
      # Add the rectangle to the image
      newArray[max(0,loc[0]):min(newArray.shape[0], loc[0]+size[0]), \
        max(0,loc[1]):min(newArray.shape[1],loc[1]+size[1])] = color
    newImage = Image.new("L", image.size)
    newImage.putdata([uint(p) for p in newArray.flatten()])
    newImage.putalpha(image.split()[1])
    return newImage
예제 #6
0
    def process(self, image):
        """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
        BaseFilter.process(self, image)
        #Type of gradient?
        type = self.random.choice(self.types)

        gradientImage = self.gradientImages.get((image.size, type))

        if not gradientImage:
            #Gradient image, used as mask
            gradientImage = Image.new("L", image.size)
            gradientArray = numpy.array(gradientImage.split()[0].getdata())
            gradientArray.resize(image.size[1], image.size[0])

            #Calculate gradient
            opacity = self.difficulty - self.difficulty * .2 + self.random.random(
            ) * self.difficulty * .2
            for i in xrange(image.size[1]):
                for j in xrange(image.size[0]):
                    if type == 'horizontal':
                        gradientArray[i][j] = int(
                            float(j) / image.size[0] * 255 / opacity)
                    elif type == 'vertical':
                        gradientArray[i][j] = int(
                            float(i) / image.size[1] * 255 / opacity)
                    elif type == 'circular':
                        gradientArray[i][j] = int(
                            math.sqrt((i - image.size[1] / 2)**2 +
                                      (j - image.size[0] / 2)**2) /
                            math.sqrt((image.size[1] / 2)**2 +
                                      (image.size[0] / 2)**2) * 255 / opacity)

            gradientImage.putdata([uint(p) for p in gradientArray.flatten()])
            #Add gradient image to dictionary
            self.gradientImages[(image.size, type)] = gradientImage

        #Image to composite with for brightness
        whiteImage = Image.new("LA", image.size)
        whiteArray = numpy.array(whiteImage.split()[0].getdata())
        whiteArray += 255
        whiteImage.putdata([uint(p) for p in whiteArray])
        newImage = Image.composite(image, whiteImage, gradientImage)
        return newImage
예제 #7
0
파일: Gradient.py 프로젝트: AI-Cdrone/nupic
  def process(self, image):
    """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
    BaseFilter.process(self, image)
    #Type of gradient?
    type = self.random.choice(self.types)

    gradientImage = self.gradientImages.get((image.size, type))

    if not gradientImage:
        #Gradient image, used as mask
        gradientImage = Image.new("L", image.size)
        gradientArray = numpy.array(gradientImage.split()[0].getdata())
        gradientArray.resize(image.size[1], image.size[0])

        #Calculate gradient
        opacity = self.difficulty - self.difficulty*.2 + self.random.random()*self.difficulty*.2
        for i in xrange(image.size[1]):
            for j in xrange(image.size[0]):
                if type == 'horizontal':
                    gradientArray[i][j] = int(float(j)/image.size[0]*255/opacity)
                elif type == 'vertical':
                    gradientArray[i][j] = int(float(i)/image.size[1]*255/opacity)
                elif type == 'circular':
                    gradientArray[i][j] = int(math.sqrt((i - image.size[1]/2)**2 + (j - image.size[0]/2)**2)/math.sqrt((image.size[1]/2)**2 + (image.size[0]/2)**2)*255/opacity)

        gradientImage.putdata([uint(p) for p in gradientArray.flatten()])
        #Add gradient image to dictionary
        self.gradientImages[(image.size, type)] = gradientImage

    #Image to composite with for brightness
    whiteImage = Image.new("LA", image.size)
    whiteArray = numpy.array(whiteImage.split()[0].getdata())
    whiteArray += 255
    whiteImage.putdata([uint(p) for p in whiteArray])
    newImage = Image.composite(image, whiteImage, gradientImage)
    return newImage
예제 #8
0
    def process(self, image):
        """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """

        BaseFilter.process(self, image)

        if self.mode != 'gray':
            raise RuntimeError(
                "EqualizeHistogram only supports grayscale images.")

        if self.region == 'bbox':
            bbox = image.split()[1].getbbox()
            croppedImage = image.crop(bbox)
            croppedImage.load()
            alpha = croppedImage.split()[1]
            croppedImage = ImageOps.equalize(croppedImage.split()[0])
            croppedImage.putalpha(alpha)
            image.paste(croppedImage, bbox)
        elif self.region == 'mask':
            bbox = image.split()[1].getbbox()
            croppedImage = image.crop(bbox)
            croppedImage.load()
            alpha = croppedImage.split()[1]
            # Fill in the part of the cropped image outside the bounding box with
            # uniformly-distributed noise
            noiseArray = \
              numpy.random.randint(0, 255, croppedImage.size[0]*croppedImage.size[1])
            noiseImage = Image.new('L', croppedImage.size)
            noiseImage.putdata([uint(p) for p in noiseArray])
            compositeImage = Image.composite(croppedImage, noiseImage, alpha)
            # Equalize the composite image
            compositeImage = ImageOps.equalize(compositeImage.split()[0])
            # Paste the part of the equalized image within the mask back
            # into the cropped image
            croppedImage = Image.composite(compositeImage, croppedImage, alpha)
            croppedImage.putalpha(alpha)
            # Paste the cropped image back into the full image
            image.paste(croppedImage, bbox)
        elif self.region == 'all':
            alpha = image.split()[1]
            image = ImageOps.equalize(image.split()[0])
            image.putalpha(alpha)
        return image
예제 #9
0
  def process(self, image):
    """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """

    BaseFilter.process(self, image)

    if self.mode != 'gray':
      raise RuntimeError("EqualizeHistogram only supports grayscale images.")

    if self.region == 'bbox':
      bbox = image.split()[1].getbbox()
      croppedImage = image.crop(bbox)
      croppedImage.load()
      alpha = croppedImage.split()[1]
      croppedImage = ImageOps.equalize(croppedImage.split()[0])
      croppedImage.putalpha(alpha)
      image.paste(croppedImage, bbox)
    elif self.region == 'mask':
      bbox = image.split()[1].getbbox()
      croppedImage = image.crop(bbox)
      croppedImage.load()
      alpha = croppedImage.split()[1]
      # Fill in the part of the cropped image outside the bounding box with
      # uniformly-distributed noise
      noiseArray = \
        numpy.random.randint(0, 255, croppedImage.size[0]*croppedImage.size[1])
      noiseImage = Image.new('L', croppedImage.size)
      noiseImage.putdata([uint(p) for p in noiseArray])
      compositeImage = Image.composite(croppedImage, noiseImage, alpha)
      # Equalize the composite image
      compositeImage = ImageOps.equalize(compositeImage.split()[0])
      # Paste the part of the equalized image within the mask back
      # into the cropped image
      croppedImage = Image.composite(compositeImage, croppedImage, alpha)
      croppedImage.putalpha(alpha)
      # Paste the cropped image back into the full image
      image.paste(croppedImage, bbox)
    elif self.region == 'all':
      alpha = image.split()[1]
      image = ImageOps.equalize(image.split()[0])
      image.putalpha(alpha)
    return image
예제 #10
0
  def process(self, image):
    """
    @param image -- The image to process.

    Returns a single image, or a list containing one or more images.
    """
    BaseFilter.process(self, image)
    #Create numpy array from image grayscale data and resize to image dimensions
    imageArray = numpy.array(image.split()[0].getdata())
    imageArray.resize(image.size[1], image.size[0])
    #Calculate offset from difficulty level
    offset = self.difficulty*(self.maxOffset)
    #Add random change to offset within window size
    halfWindowSize = 0.1*offset
    offset = (offset - halfWindowSize) + halfWindowSize*self.random.random()*((-1)**self.random.randint(1, 2))
    #Apply random direction
    offset *= ((-1)**self.random.randint(1, 2))
    imageArray += offset
    #Recreate PIL image
    newImage = Image.new("L", image.size)
    newImage.putdata([uint(p) for p in imageArray.flatten()])
    newImage.putalpha(image.split()[1])
    return newImage
예제 #11
0
    def _processImage(self, image, bbox):
        """Return a single image, or a list containing one or more images.

    @param image -- The image to process.
    """
        BaseFilter.process(self, image)

        inWidth, inHeight = image.size

        # Get output dims and buffers (cached)
        outWidth, outHeight, inBuffer, outBuffer, mask = self._prepare(
            image.size)

        # Ask the sub-class the build the filter bank
        self._buildFilterBank()

        inputOffset = 0
        outputOffset = 0

        data = image.split()[0]
        inputVector = numpy.asarray(data, dtype=numpy.float32)

        inputVector.shape = (inHeight, inWidth)

        # If we are using "color-key" mode, then detect the value of
        # the upper-left pixel and use it as the value of
        # 'offImagePixelValue'
        if self._offImagePixelValue in ('colorKey', u'colorKey'):
            offImagePixelValue = inputVector[0, 0]
        else:
            offImagePixelValue = self._offImagePixelValue

        result = []

        # Compute the convolution responses

        # Determine proper input/output dimensions
        outputSize = outHeight * outWidth * self._outputPlaneCount

        # Locate correct portion of output
        outputVector = numpy.zeros(
            (outHeight, outWidth, self._outputPlaneCount), dtype=numpy.float32)
        outputVector.shape = (self._outputPlaneCount, outHeight, outWidth)

        # Compute the bounding box to use for our C implementation
        imageBox = numpy.array([0, 0, inWidth, inHeight], dtype=numpy.int32)

        ## --- DEBUG CODE ----
        #global id
        #o = inputVector
        #f = os.path.abspath('convolution_input_%d.txt' % id)
        #print f
        #numpy.savetxt(f, o)
        #id += 1
        ##from dbgp.client import brk; brk(port=9019)
        ## --- DEBUG CODE END ----

        # Call the fast convolution C code
        self._convolve(inputVector, bbox, imageBox, outputVector,
                       offImagePixelValue, inBuffer, outBuffer)

        outputVector = numpy.rollaxis(outputVector, 0, 3)
        outputVector = outputVector.reshape(outWidth * outHeight,
                                            self._outputPlaneCount).flatten()
        assert outputVector.dtype == numpy.float32

        locationCount = len(outputVector) / self._outputPlaneCount
        response = outputVector.reshape(locationCount, self._outputPlaneCount)

        ## --- DEBUG CODE ----
        #global id
        #o = outputVector.flatten()
        ##print outputVector.shape, len(o)
        #f = os.path.abspath('convolution_output_%d.txt' % id)
        #print f
        #numpy.savetxt(f, o)
        #id += 1
        ##from dbgp.client import brk; brk(port=9019)
        ## --- DEBUG CODE END ----

        # Convert the reponses to images
        result = []
        for i in range(response.shape[1]):
            newImage = Image.new('L', (outWidth, outHeight))
            #data = (self._gainConstant * 255.0 * response[:,i]).clip(min=0.0, max=255.0).astype(numpy.uint8)
            data = (255.0 * response[:, i]).clip(min=0.0,
                                                 max=255.0).astype(numpy.uint8)
            newImage.putdata([uint(p) for p in data])
            newImage.putalpha(mask)
            result.append(newImage)

        return (result, outputVector)
예제 #12
0
  def _processImage(self, image, bbox):
    """Return a single image, or a list containing one or more images.

    @param image -- The image to process.
    """
    BaseFilter.process(self, image)

    inWidth, inHeight = image.size

    # Get output dims and buffers (cached)
    outWidth, outHeight, inBuffer, outBuffer, mask = self._prepare(image.size)

    # Ask the sub-class the build the filter bank
    self._buildFilterBank()

    inputOffset  = 0
    outputOffset = 0

    data = image.split()[0]
    inputVector = numpy.asarray(data, dtype=numpy.float32)

    inputVector.shape = (inHeight, inWidth)

    # If we are using "color-key" mode, then detect the value of
    # the upper-left pixel and use it as the value of
    # 'offImagePixelValue'
    if self._offImagePixelValue in ('colorKey', u'colorKey'):
      offImagePixelValue = inputVector[0, 0]
    else:
      offImagePixelValue = self._offImagePixelValue

    result = []

    # Compute the convolution responses

    # Determine proper input/output dimensions
    outputSize = outHeight * outWidth * self._outputPlaneCount

    # Locate correct portion of output
    outputVector = numpy.zeros((outHeight,
                                outWidth,
                                self._outputPlaneCount),
                                dtype=numpy.float32)
    outputVector.shape = (self._outputPlaneCount, outHeight, outWidth)


    # Compute the bounding box to use for our C implementation
    imageBox = numpy.array([0, 0, inWidth, inHeight], dtype=numpy.int32)

    ## --- DEBUG CODE ----
    #global id
    #o = inputVector
    #f = os.path.abspath('convolution_input_%d.txt' % id)
    #print f
    #numpy.savetxt(f, o)
    #id += 1
    ##from dbgp.client import brk; brk(port=9019)
    ## --- DEBUG CODE END ----

    # Call the fast convolution C code
    self._convolve(inputVector,
                   bbox,
                   imageBox,
                   outputVector,
                   offImagePixelValue,
                   inBuffer,
                   outBuffer)

    outputVector = numpy.rollaxis(outputVector, 0, 3)
    outputVector = outputVector.reshape(outWidth * outHeight,
                                        self._outputPlaneCount).flatten()
    assert outputVector.dtype == numpy.float32

    locationCount = len(outputVector) / self._outputPlaneCount
    response = outputVector.reshape(locationCount, self._outputPlaneCount)

    ## --- DEBUG CODE ----
    #global id
    #o = outputVector.flatten()
    ##print outputVector.shape, len(o)
    #f = os.path.abspath('convolution_output_%d.txt' % id)
    #print f
    #numpy.savetxt(f, o)
    #id += 1
    ##from dbgp.client import brk; brk(port=9019)
    ## --- DEBUG CODE END ----

    # Convert the reponses to images
    result = []
    for i in range(response.shape[1]):
      newImage = Image.new('L', (outWidth, outHeight))
      #data = (self._gainConstant * 255.0 * response[:,i]).clip(min=0.0, max=255.0).astype(numpy.uint8)
      data = (255.0 * response[:,i]).clip(min=0.0, max=255.0).astype(numpy.uint8)
      newImage.putdata([uint(p) for p in data])
      newImage.putalpha(mask)
      result.append(newImage)

    return (result, outputVector)