Ejemplo n.º 1
0
    def process(self, data):

        # only works with 2d, scalar data
        if Image.is_data_2d(data) and Image.is_data_scalar_type(data):

            # make a copy of the data so that other threads can use data while we're processing
            # otherwise numpy puts a lock on the data.
            data_copy = data.copy()

            # grab our parameters. ideally this could just access the member variables directly,
            # but it doesn't work that way (yet).
            sigma1 = self.get_property("sigma1")
            sigma2 = self.get_property("sigma2")
            weight2 = self.get_property("weight2")

            # first calculate the FFT
            fft_data = scipy.fftpack.fftshift(scipy.fftpack.fft2(data_copy))

            # next, set up xx, yy arrays to be linear indexes for x and y coordinates ranging
            # from -width/2 to width/2 and -height/2 to height/2.
            yy_min = int(math.floor(-data.shape[0] / 2))
            yy_max = int(math.floor(data.shape[0] / 2))
            xx_min = int(math.floor(-data.shape[1] / 2))
            xx_max = int(math.floor(data.shape[1] / 2))
            xx, yy = numpy.meshgrid(numpy.linspace(yy_min, yy_max, data.shape[0]),
                                    numpy.linspace(xx_min, xx_max, data.shape[1]))

            # calculate the pixel distance from the center
            rr = numpy.sqrt(numpy.square(xx) + numpy.square(yy)) / (data.shape[0] * 0.5)

            # finally, apply a filter to the Fourier space data.
            filter = numpy.exp(-0.5 * numpy.square(rr / sigma1)) - (1.0 - weight2) * numpy.exp(
                -0.5 * numpy.square(rr / sigma2))
            filtered_fft_data = fft_data * filter

            # and then do invert FFT and take the real value.
            return scipy.fftpack.ifft2(scipy.fftpack.ifftshift(filtered_fft_data)).real

        else:
            # not 2d data.
            raise NotImplementedError()
Ejemplo n.º 2
0
 def process(self, data):
     img = Image.create_rgba_image_from_array(data)  # inefficient since we're just converting back to gray
     if id(img) == id(data):
         img = img.copy()
     if id(img.base) == id(data):
         img = img.copy()
     img = img.view(numpy.uint8).reshape(img.shape + (4,))  # expand the color into uint8s
     img_gray = cv2.cvtColor(img, cv.CV_RGB2GRAY)
     img_gray = cv2.equalizeHist(img_gray)
     rects = detect(img_gray, relative_file(__file__, "haarcascade_frontalface_alt.xml"))
     draw_rects(img, rects, (0, 255, 0))
     return img
Ejemplo n.º 3
0
 def process(self, img):
     grad = np.zeros(img.shape+(3L,),dtype=np.uint8) # rgb format
     # grad will be returned at the end, then Swift will identify it as rgb and display it as such.
     w = img.shape[0] #w and h are much shorter to read than img.shape[0] and img.shape[1]
     h = img.shape[1]
     if Image.is_data_complex_type(img): #If it's complex, we want to show the phase data, otherwise just a color map
         ave_intensity = np.median(np.log(abs(img))) #To see the colors in the cool parts more clearly, ignore the noise in the dark
         max_intensity = max(np.log(abs(img[0:w/2-2])).max(), np.log(abs(img[w/2+2:])).max(),       #not counting 
                             np.log(abs(img[0:,0:h/2-2])).max(), np.log(abs(img[0:,h/2+2:])).max()) #center pixels
         simgpx = img[range(1,w)+[0,]]       #shift image plus in x. It's a new view on the image, shifted one pixel
         simgmx = img[[w-1,]+range(0,w-1)]   #over.
         simgpy = img[:,range(1,h)+[0,]]
         simgmy = img[:,[h-1,]+range(0,h-1)]
         
         nplusx  = np.sqrt(1/abs(img) + 1/abs(simgpx)) #Implicit looping lets it calculate an nplusx array in a
         nminusx = np.sqrt(1/abs(img) + 1/abs(simgmx)) #single line
         nplusy  = np.sqrt(1/abs(img) + 1/abs(simgpy))
         nminusy = np.sqrt(1/abs(img) + 1/abs(simgmy))
         
         arcimg = np.arctan2(img.imag,img.real) #for SPEED, not that it helps as much as I hoped
         dlambdaplusx  = (np.arctan2(simgpx.imag,simgpx.real)-arcimg) % 6.2831
         dlambdaminusx = (arcimg-np.arctan2(simgmx.imag,simgmx.real)) % 6.2831
         dlambdaplusy  = (np.arctan2(simgpy.imag,simgpy.real)-arcimg) % 6.2831
         dlambdaminusy = (arcimg-np.arctan2(simgmy.imag,simgmy.real)) % 6.2831
         
         dlambdax = (dlambdaplusx + ((dlambdaminusx-dlambdaplusx+3.1415)%6.2831-3.1415)*nplusx / (nplusx+nminusx)) % 6.2831
         dlambday = (dlambdaplusy + ((dlambdaminusy-dlambdaplusy+3.1415)%6.2831-3.1415)*nplusy / (nplusy+nminusy)) % 6.2831
         
         X = (dlambdax/math.pi)/2     #The realspace location, as a number from 0 to 1
         Y = (dlambday/math.pi)/2
         magnitude = np.log(abs(img)) #Putting the FFT on a log scale to see the dark parts more easily
         I = np.maximum(np.minimum((magnitude-ave_intensity)/(max_intensity-ave_intensity),1),0) #Intensity
         H = np.arctan2(X-0.5,Y-0.5)  #Hue
         S = np.sqrt(np.square(X-0.5)+np.square(Y-0.5)) #Saturation
         grad[:,:,0] = (S*(np.cos(H)+1)*127.5+(1-S)*127.5)*I           #Blue
         grad[:,:,1] = (S*(np.cos(H-np.pi*2/3)+1)*127.5+(1-S)*127.5)*I #Green
         grad[:,:,2] = (S*(np.cos(H+np.pi*2/3)+1)*127.5+(1-S)*127.5)*I #Red
     else: #just overlay a color map onto it
         min_intensity = img.min()
         intensity_range = img.max() - min_intensity
         irow,icol = np.ogrid[0:w,0:h] #Makes 2 arrays, one of size w and one of size h
         H = np.arctan2(w/2.0-irow,h/2.0-icol) #Makes a hue map from the direction to point irow,icol from point w/2,h/2
         S = np.sqrt(np.square((irow-w/2)*np.sqrt(2)/w)+np.square((icol-h/2)*np.sqrt(2)/h)) #Saturation
         I = (img*1.0 - min_intensity)/intensity_range #Intensity
         grad[:,:,0] = (S*(np.cos(H)+1)*127.5+(1-S)*127.5)*I           #Blue
         grad[:,:,1] = (S*(np.cos(H-np.pi*2/3)+1)*127.5+(1-S)*127.5)*I #Green
         grad[:,:,2] = (S*(np.cos(H+np.pi*2/3)+1)*127.5+(1-S)*127.5)*I #Red
     return grad #Return an image to Swift either way, because that's what it wants
 def process(self, data):
     img = Image.create_rgba_image_from_array(
         data)  # inefficient since we're just converting back to gray
     if id(img) == id(data):
         img = img.copy()
     if id(img.base) == id(data):
         img = img.copy()
     img = img.view(numpy.uint8).reshape(
         img.shape + (4, ))  # expand the color into uint8s
     img_gray = cv2.cvtColor(img, cv.CV_RGB2GRAY)
     img_gray = cv2.equalizeHist(img_gray)
     rects = detect(
         img_gray, relative_file(__file__,
                                 "haarcascade_frontalface_alt.xml"))
     draw_rects(img, rects, (0, 255, 0))
     return img
Ejemplo n.º 5
0
    def process(self, img):
        grad = np.zeros(img.shape + (3L, ), dtype=np.uint8)  # rgb format
        # grad will be returned at the end, then Swift will identify it as rgb and display it as such.
        w = img.shape[
            0]  #w and h are much shorter to read than img.shape[0] and img.shape[1]
        h = img.shape[1]
        if Image.is_data_complex_type(
                img
        ):  #If it's complex, we want to show the phase data, otherwise just a color map
            ave_intensity = np.median(
                np.log(abs(img))
            )  #To see the colors in the cool parts more clearly, ignore the noise in the dark
            max_intensity = max(
                np.log(abs(img[0:w / 2 - 2])).max(),
                np.log(abs(img[w / 2 + 2:])).max(),  #not counting 
                np.log(abs(img[0:, 0:h / 2 - 2])).max(),
                np.log(abs(img[0:, h / 2 + 2:])).max())  #center pixels
            simgpx = img[range(1, w) + [
                0,
            ]]  #shift image plus in x. It's a new view on the image, shifted one pixel
            simgmx = img[[
                w - 1,
            ] + range(0, w - 1)]  #over.
            simgpy = img[:, range(1, h) + [
                0,
            ]]
            simgmy = img[:, [
                h - 1,
            ] + range(0, h - 1)]

            nplusx = np.sqrt(
                1 / abs(img) + 1 / abs(simgpx)
            )  #Implicit looping lets it calculate an nplusx array in a
            nminusx = np.sqrt(1 / abs(img) + 1 / abs(simgmx))  #single line
            nplusy = np.sqrt(1 / abs(img) + 1 / abs(simgpy))
            nminusy = np.sqrt(1 / abs(img) + 1 / abs(simgmy))

            arcimg = np.arctan2(
                img.imag,
                img.real)  #for SPEED, not that it helps as much as I hoped
            dlambdaplusx = (np.arctan2(simgpx.imag, simgpx.real) -
                            arcimg) % 6.2831
            dlambdaminusx = (arcimg -
                             np.arctan2(simgmx.imag, simgmx.real)) % 6.2831
            dlambdaplusy = (np.arctan2(simgpy.imag, simgpy.real) -
                            arcimg) % 6.2831
            dlambdaminusy = (arcimg -
                             np.arctan2(simgmy.imag, simgmy.real)) % 6.2831

            dlambdax = (dlambdaplusx + (
                (dlambdaminusx - dlambdaplusx + 3.1415) % 6.2831 - 3.1415) *
                        nplusx / (nplusx + nminusx)) % 6.2831
            dlambday = (dlambdaplusy + (
                (dlambdaminusy - dlambdaplusy + 3.1415) % 6.2831 - 3.1415) *
                        nplusy / (nplusy + nminusy)) % 6.2831

            X = (dlambdax /
                 math.pi) / 2  #The realspace location, as a number from 0 to 1
            Y = (dlambday / math.pi) / 2
            magnitude = np.log(
                abs(img)
            )  #Putting the FFT on a log scale to see the dark parts more easily
            I = np.maximum(
                np.minimum((magnitude - ave_intensity) /
                           (max_intensity - ave_intensity), 1), 0)  #Intensity
            H = np.arctan2(X - 0.5, Y - 0.5)  #Hue
            S = np.sqrt(np.square(X - 0.5) + np.square(Y - 0.5))  #Saturation
            grad[:, :, 0] = (S * (np.cos(H) + 1) * 127.5 +
                             (1 - S) * 127.5) * I  #Blue
            grad[:, :, 1] = (S * (np.cos(H - np.pi * 2 / 3) + 1) * 127.5 +
                             (1 - S) * 127.5) * I  #Green
            grad[:, :, 2] = (S * (np.cos(H + np.pi * 2 / 3) + 1) * 127.5 +
                             (1 - S) * 127.5) * I  #Red
        else:  #just overlay a color map onto it
            min_intensity = img.min()
            intensity_range = img.max() - min_intensity
            irow, icol = np.ogrid[
                0:w, 0:h]  #Makes 2 arrays, one of size w and one of size h
            H = np.arctan2(
                w / 2.0 - irow, h / 2.0 - icol
            )  #Makes a hue map from the direction to point irow,icol from point w/2,h/2
            S = np.sqrt(
                np.square((irow - w / 2) * np.sqrt(2) / w) + np.square(
                    (icol - h / 2) * np.sqrt(2) / h))  #Saturation
            I = (img * 1.0 - min_intensity) / intensity_range  #Intensity
            grad[:, :, 0] = (S * (np.cos(H) + 1) * 127.5 +
                             (1 - S) * 127.5) * I  #Blue
            grad[:, :, 1] = (S * (np.cos(H - np.pi * 2 / 3) + 1) * 127.5 +
                             (1 - S) * 127.5) * I  #Green
            grad[:, :, 2] = (S * (np.cos(H + np.pi * 2 / 3) + 1) * 127.5 +
                             (1 - S) * 127.5) * I  #Red
        return grad  #Return an image to Swift either way, because that's what it wants