Example #1
0
    def __init__(self):
        QMainWindow.__init__(self, None,
                             "FFTLab Main Window",
                             Qt.WType_TopLevel | Qt.WDestructiveClose)

        self.file_menu = QPopupMenu(self)
        self.file_menu.insertItem('&Quit', self.file_quit, Qt.CTRL + Qt.Key_Q)
        self.menuBar().insertItem('&File', self.file_menu)

        self.help_menu = QPopupMenu(self)
        self.menuBar().insertSeparator()
        self.menuBar().insertItem('&Help', self.help_menu)

        self.help_menu.insertItem('&About', self.about)

        self.main_widget = QWidget(self, "Main widget")

        data = ((lena()/255.)).astype("complex64")
        kernel = np.ones((6,6)).astype("complex64")
        #data = np.random.uniform(0,1,(8,8)).astype("complex64")
        #kernel = np.random.uniform(0,1,(7,7)).astype("complex64")
        #power_spec = fftshift(log(abs(signal.fftn(data))))

        gpu_conv = fftconvolve2d(data,kernel)
        cpu_conv = fftconvolve(data.real, kernel.real, mode="valid")

        info("GPU shape = (%s, %s)" % gpu_conv.shape)
        info("CPU shape = (%s, %s)" % cpu_conv.shape)
        
        check_results(cpu_conv, gpu_conv)

        data_c = ImageCanvas(data.real, self.main_widget)
        kernel_c = ImageCanvas(kernel.real, self.main_widget)
        gpu_conv_c = ImageCanvas(gpu_conv, self.main_widget)
        cpu_conv_c = ImageCanvas(cpu_conv, self.main_widget)
        #power_spec = ImageCanvas(power_spec,self.main_widget)

        data_label = QLabel("Input Data (lena)", self.main_widget)
        data_label.setAlignment(QLabel.AlignCenter)
        kernel_label = QLabel("Convolution Kernel", self.main_widget)
        kernel_label.setAlignment(QLabel.AlignCenter)
        gpu_conv_label = QLabel("GPU fftconvolve (CUDA)", self.main_widget)
        gpu_conv_label.setAlignment(QLabel.AlignCenter)
        cpu_conv_label = QLabel("CPU fftconvolve (NumPy)", self.main_widget)
        cpu_conv_label.setAlignment(QLabel.AlignCenter)

        g = QGridLayout(self.main_widget)
        g.addWidget(data_label,0,0)
        g.addWidget(kernel_label,0,1)
        g.addWidget(data_c,1,0)
        g.addWidget(kernel_c,1,1)
        g.addWidget(gpu_conv_label,2,0)
        g.addWidget(cpu_conv_label,2,1)
        g.addWidget(gpu_conv_c,3,0)
        g.addWidget(cpu_conv_c,3,1)

        self.main_widget.setFocus()
        self.setCentralWidget(self.main_widget)

        self.statusBar().message("%s - v%s" % (PROGNAME, PROG_VERSION) , 2000)
def test_connect_regions_with_grid():
    lena = sp.lena()
    mask = lena > 50
    graph = grid_to_graph(*lena.shape, **{'mask': mask})
    assert_equal(ndimage.label(mask)[1], cs_graph_components(graph)[0])

    mask = lena > 150
def test_connect_regions():
    lena = sp.lena()
    for thr in (50, 150):
        mask = lena > thr
        graph = img_to_graph(lena, mask)
        nose.tools.assert_equal(ndimage.label(mask)[1],
                                cs_graph_components(graph)[0])
Example #4
0
    def __init__(self):
        QMainWindow.__init__(self, None, "FFTLab Main Window",
                             Qt.WType_TopLevel | Qt.WDestructiveClose)

        self.file_menu = QPopupMenu(self)
        self.file_menu.insertItem('&Quit', self.file_quit, Qt.CTRL + Qt.Key_Q)
        self.menuBar().insertItem('&File', self.file_menu)

        self.help_menu = QPopupMenu(self)
        self.menuBar().insertSeparator()
        self.menuBar().insertItem('&Help', self.help_menu)

        self.help_menu.insertItem('&About', self.about)

        self.main_widget = QWidget(self, "Main widget")

        data = ((lena() / 255.)).astype("complex64")
        kernel = np.ones((6, 6)).astype("complex64")
        #data = np.random.uniform(0,1,(8,8)).astype("complex64")
        #kernel = np.random.uniform(0,1,(7,7)).astype("complex64")
        #power_spec = fftshift(log(abs(signal.fftn(data))))

        gpu_conv = fftconvolve2d(data, kernel)
        cpu_conv = fftconvolve(data.real, kernel.real, mode="valid")

        info("GPU shape = (%s, %s)" % gpu_conv.shape)
        info("CPU shape = (%s, %s)" % cpu_conv.shape)

        check_results(cpu_conv, gpu_conv)

        data_c = ImageCanvas(data.real, self.main_widget)
        kernel_c = ImageCanvas(kernel.real, self.main_widget)
        gpu_conv_c = ImageCanvas(gpu_conv, self.main_widget)
        cpu_conv_c = ImageCanvas(cpu_conv, self.main_widget)
        #power_spec = ImageCanvas(power_spec,self.main_widget)

        data_label = QLabel("Input Data (lena)", self.main_widget)
        data_label.setAlignment(QLabel.AlignCenter)
        kernel_label = QLabel("Convolution Kernel", self.main_widget)
        kernel_label.setAlignment(QLabel.AlignCenter)
        gpu_conv_label = QLabel("GPU fftconvolve (CUDA)", self.main_widget)
        gpu_conv_label.setAlignment(QLabel.AlignCenter)
        cpu_conv_label = QLabel("CPU fftconvolve (NumPy)", self.main_widget)
        cpu_conv_label.setAlignment(QLabel.AlignCenter)

        g = QGridLayout(self.main_widget)
        g.addWidget(data_label, 0, 0)
        g.addWidget(kernel_label, 0, 1)
        g.addWidget(data_c, 1, 0)
        g.addWidget(kernel_c, 1, 1)
        g.addWidget(gpu_conv_label, 2, 0)
        g.addWidget(cpu_conv_label, 2, 1)
        g.addWidget(gpu_conv_c, 3, 0)
        g.addWidget(cpu_conv_c, 3, 1)

        self.main_widget.setFocus()
        self.setCentralWidget(self.main_widget)

        self.statusBar().message("%s - v%s" % (PROGNAME, PROG_VERSION), 2000)
def _downsampled_lena():
    lena = sp.lena()
    lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + \
           lena[1::2, 1::2]
    lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + \
           lena[1::2, 1::2]
    lena /= 16.0
    return lena
Example #6
0
def test_connect_regions_with_grid():
    lena = sp.lena()
    mask = lena > 50
    graph = grid_to_graph(*lena.shape, **{"mask": mask})
    assert_equal(ndimage.label(mask)[1], cs_graph_components(graph)[0])

    mask = lena > 150
    graph = grid_to_graph(*lena.shape, **{"mask": mask, "dtype": None})
    assert_equal(ndimage.label(mask)[1], cs_graph_components(graph)[0])
 def __init__(self):
     iris         = datasets.load_iris()
     self._x_iris = iris.data
     self._y_iris = iris.target
     try:
        self._lena = sp.lena()
     except AttributeError:
        from scipy import misc
        self._lena = misc.lena()
Example #8
0
def test_connect_regions_with_grid():
    lena = sp.lena()
    mask = lena > 50
    graph = grid_to_graph(*lena.shape, mask=mask)
    nose.tools.assert_equal(ndimage.label(mask)[1],
                            cs_graph_components(graph)[0])

    mask = lena > 150
    graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
    nose.tools.assert_equal(ndimage.label(mask)[1],
                            cs_graph_components(graph)[0])
Example #9
0
def test_connect_regions_with_grid():
    lena = sp.lena()
    mask = lena > 50
    graph = grid_to_graph(*lena.shape, **{'mask' : mask})
    nose.tools.assert_equal(ndimage.label(mask)[1],
                            cs_graph_components(graph)[0])

    mask = lena > 150
    graph = grid_to_graph(*lena.shape, **{'mask' : mask, 'dtype' : None})
    nose.tools.assert_equal(ndimage.label(mask)[1],
                            cs_graph_components(graph)[0])
Example #10
0
def test_tvdenoise():
    lena = scipy.lena().astype(np.float)
    noisy_lena = lena + 0.2 * lena.std()*np.random.randn(*lena.shape)
    denoised_lena_W5 = tvdenoise(lena, niter=10, W=5.0)
    denoised_lena_W50 = tvdenoise(lena, niter=10, W=50.)
    grad_mag_lena = gradient_magnitude(lena).sum()
    grad_mag_noisy = gradient_magnitude(noisy_lena).sum()
    grad_mag_denoised_W5 = gradient_magnitude(denoised_lena_W5).sum()
    grad_mag_denoised_W50 = gradient_magnitude(denoised_lena_W50).sum()
    assert grad_mag_noisy > max(grad_mag_denoised_W5, grad_mag_denoised_W50)
    assert grad_mag_denoised_W5 > grad_mag_denoised_W50
    assert grad_mag_denoised_W5 > 0.5 * grad_mag_lena 
Example #11
0
def main():
    x=lena()
    
    a=int(32)
    
    y=wv.misc.per_ext2d(x,a)
    z=wv.misc.symm_ext2d(x,a)
    
    plt.subplot(2,1,1)
    plt.imshow(y,cmap=cm.gray)
    plt.xlabel('Periodic Ext')
    
    plt.subplot(2,1,2)
    plt.imshow(z,cmap=cm.gray)
    plt.xlabel('Symmetric Ext')
    
    plt.show()
def unsupervisedLearningTest02():
	from sklearn import cluster
	import scipy as sp
	import numpy as np
	try:
		lena = sp.lena()
	except AttributeError:
		from scipy import misc
		lena = misc.lena()

	X = lena.reshape((-1, 1))
	k_means = cluster.KMeans(n_clusters = 5, n_init = 1)
	k_means.fit(X)
	values = k_means.cluster_centers_.squeeze()
	labels = k_means.labels_
	lena_compressed = np.choose(labels, values)
	lena_compressed.shape = lena.shape

	print lena_compressed
 def test_tv_denoise_2d(self):
     """
     Apply the TV denoising algorithm on the lena image provided
     by scipy
     """
     import scipy
     # lena image
     lena = scipy.lena().astype(np.float)
     # add noise to lena
     lena += 0.5 * lena.std()*np.random.randn(*lena.shape)
     # denoise
     denoised_lena = F.tv_denoise(lena, weight=60.0)
     # which dtype?
     assert denoised_lena.dtype in [np.float, np.float32, np.float64]
     from scipy import ndimage
     grad = ndimage.morphological_gradient(lena, size=((3,3)))
     grad_denoised = ndimage.morphological_gradient(denoised_lena, size=((3,3)))
     # test if the total variation has decreased
     assert np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()) / 2
     denoised_lena_int = F.tv_denoise(lena.astype(np.int32), \
             weight=60.0, keep_type=True)
     assert denoised_lena_int.dtype is np.dtype('int32')
Example #14
0
 def test_tv_denoise_2d(self):
     """
     Apply the TV denoising algorithm on the lena image provided
     by scipy
     """
     import scipy
     # lena image
     lena = scipy.lena().astype(np.float)
     # add noise to lena
     lena += 0.5 * lena.std() * np.random.randn(*lena.shape)
     # denoise
     denoised_lena = F.tv_denoise(lena, weight=60.0)
     # which dtype?
     assert denoised_lena.dtype in [np.float, np.float32, np.float64]
     from scipy import ndimage
     grad = ndimage.morphological_gradient(lena, size=((3, 3)))
     grad_denoised = ndimage.morphological_gradient(denoised_lena,
                                                    size=((3, 3)))
     # test if the total variation has decreased
     assert np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum())
     denoised_lena_int = F.tv_denoise(lena.astype(np.int32), \
             weight=60.0, keep_type=True)
     assert denoised_lena_int.dtype is np.dtype('int32')
Example #15
0
            for key in paths:
                shift = numpy.zeros(2)
                count = min(paths[key]["count"])
                for npa in paths[key]["shift"]:
                    shift += npa
                d.append({"path": key, "shift": shift, "count": count})
            d.sort(mysort)
        return d

if __name__ == "__main__":
    #lena1 = numpy.zeros((512, 512))
    #scipy.lena()
    #lena1[100:150, 160:200] = 1
    ao1, ao2 = 5, 3
    print("Absolute offset is %s,%s" % (ao1, ao2))
    lena1 = scipy.lena()
    lena2 = numpy.zeros_like(lena1)
    lena2[ao1:, ao2:] = lena1[:-ao1, :-ao2]
    #    out = Visual_SURF(lena1, lena2)
    """
    out = feature.surf2(lena1, lena2, verbose=1)
    print "clacShift", calcShift(out)

#    raw_input("Enter to continue")
    out2 = feature.reduce_orsa(out)
#    print "SURF: %s keypoint; ORSA -> %s" % (out.shape[0], out2.shape[0])
#    out = out2
    print "*" * 80
#    out = feature.sift2(lena1, lena2, verbose=1)
    out = Visual_SIFT(lena1, lena2)
    print "clacShift", calcShift(out)
import numpy as np
import scipy as sp
import pylab as pl
from scipy import ndimage, signal
l = sp.lena()[200:-140, 190:-150]
l = l/float(l.max())
pl.figure(figsize=(12, 4.5))
pl.axes([0.15, 0, 0.3, 1])
pl.gray()
pl.imshow(l, vmin=0, vmax=1)
pl.title('Ground truth')
pl.axis('off')
pl.axes([0.5, 0, 0.3, 1])
g = l + .13*np.random.normal(size=l.shape)
pl.imshow(g, vmin=0, vmax=1)
pl.title('Noisy observation')
pl.axis('off')

Example #17
0
import numpy as np
import scipy as sp

import harris

im = sp.lena()
harrisim = harris.compute_harris_response(im)
filtered_coords = harris.get_harris_points(harrisim, 6)
harris.plot_harris_points(im, filtered_coords)

Example #18
0
# Read in the lena image and use an averging filter
# to "smooth" the image.  Use a "5 point stencil" where
# you average the current pixel with its neighboring pixels
#
#               0 0 0 0 0 0 0
#               0 0 0 x 0 0 0
#               0 0 x x x 0 0
#               0 0 0 x 0 0 0
#               0 0 0 0 0 0 0
#
# Plot the image, the smoothed image, and the difference between the
# two.
#
# Bonus: Re-filter the image by passing the result image
#        through the filter again.  Do this 50 times and plot
#        the resulting image.

from scipy import lena
from matplotlib.pylab import subplot, imshow, title, show, gray, cm

img = lena()
imshow(img, cmap=cm.gray)
show()
Example #19
0
import scipy as sp
import numpy as np
import pylab as pl

l = sp.lena()
l = l[235:235 + 153, 205:162 + 205]

t = pl.imread('tarek.jpg')
t = t[::-1, ...]
t = t.sum(axis=-1)

pl.figure()
pl.imshow(t, cmap=pl.cm.gray)
pl.axis('off')

pl.figure()
pl.imshow(l, cmap=pl.cm.gray)
pl.axis('off')

t = t.astype(np.float)
t /= t.max()

l = l.astype(np.float)
l /= l.max()

pl.figure()
pl.imshow(t + l, cmap=pl.cm.gray)
pl.axis('off')
print __doc__

from time import time

import pylab as pl
import scipy as sp
import numpy as np

from sklearn.decomposition import DictionaryLearningOnline
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d

###############################################################################
# Load Lena image and extract patches

lena = sp.lena() / 256.0

# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 16.0
height, width = lena.shape

# Distort the right half of the image
print 'Distorting image...'
distorted = lena.copy()
distorted[:, height / 2:] += 0.075 * np.random.randn(width, height / 2)

# Extract all clean patches from the left half of the image
print 'Extracting clean patches...'
t0 = time()
                   0 0 x x x 0 0
                   0 0 0 x 0 0 0 
                   0 0 0 0 0 0 0
    
    Once you have a numpy expression that works correctly, time it
    using time.time (or time.clock on windows).
    
    Use scipy.weave.blitz to run the same expression.  Again time it.
    
    Compare the speeds of the two function and calculate the speed-up 
    (numpy_time/weave_time).
    
    Plot two images that result from the two approaches and compare them.
"""

import time
from numpy import empty, float64
from scipy import lena
from scipy import weave
from matplotlib.pylab import subplot, imshow, title, show, gray, figure

img = lena()

expr = """avg_img =(  img[1:-1 ,1:-1]  # center
                    + img[ :-2 ,1:-1]  # left
                    + img[2:   ,1:-1]  # right
                    + img[1:-1 , :-2]  # top
                    + img[1:-1 ,2:  ]  # bottom
                    ) / 5.0"""

#!/usr/bin/env python
import numpy as np
import scipy
import linear_operators as lo

# Load the infamous Lena image from scipy
im = scipy.lena()
im = im[::4, ::4]
# Generate a convolution model with a 7x7 uniform kernel
model = lo.convolve_fftw3(im.shape, np.ones((7, 7)))
# convolve the original image
data = model * im.ravel()
# add noise to the convolved data
data += 1e0 * np.random.randn(*data.shape)
# define smoothness prior
#prior = lo.concatenate([lo.diff(im.shape, axis=i) for i in xrange(im.ndim)])
prior = lo.concatenate([lo.diff(im.shape, axis=i) for i in xrange(im.ndim)]
                       + [lo.wavelet2(im.shape, "haar"),])
# generate algorithm
algo = lo.DoubleLoopAlgorithm(model, data, prior)
# start the estimation algorithm
xe = algo()
# reshape the output as the algorithm only handles vectors
xe.resize(im.shape)
Example #23
0
    # TODO: simpler imshow, without complete GUI
    return _advanced_imshow(im, flip=flip, mgr=mgr)

def _advanced_imshow(im, flip=None, mgr=None):
    return AdvancedImageViewerApp(im, flip=flip, mgr=mgr)

if __name__ == "__main__":

    from scikits.image.filter import tvdenoise
    from scikits.image.io import imread, imshow
    import numpy.random as npr
    import os, os.path
    import sys

    app = QApplication(sys.argv)

    if len(sys.argv) > 1:
        image = imread(sys.argv[1])
    else:
        import scipy
        image = scipy.lena()

    flip = None
    if len(sys.argv) > 2:
        flip = imread(sys.argv[2])

    viewer = _advanced_imshow(image, flip=flip, mgr=None)
    viewer.show()

    sys.exit(app.exec_())
Example #24
0
    # select the best points taking min_distance into account
    filtered_coords = []
    for i in index:
        if allowed_locations[coords[i][0]][coords[i][1]] == 1:
            filtered_coords.append(coords[i])
            allowed_locations[
              (coords[i][0] - min_distance):(coords[i][0] + min_distance),
              (coords[i][1] - min_distance):(coords[i][1] + min_distance)] = 0

    return filtered_coords


def plot_harris_points(image, filtered_coords):
    """ plots corners found in image"""

    pyplot.subplot(111)
    pyplot.imshow(image)
    pyplot.plot([p[1] for p in filtered_coords],
                [p[0] for p in filtered_coords],
                '*')
    pyplot.axis('off')
    pyplot.show()


if __name__ == '__main__':
    import scipy as sp
    im = sp.lena().astype(float)
    harrisim = compute_harris_response(im)
    filtered_coords = get_harris_points(harrisim, 6)
    plot_harris_points(im, filtered_coords)
Example #25
0
def _downsampled_lena():
    lena = sp.lena()
    lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
    lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
    lena /= 16.0
    return lena
print __doc__

from time import time

import pylab as pl
import scipy as sp
import numpy as np

from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d

###############################################################################
# Load Lena image and extract patches

lena = sp.lena() / 256.0

# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape

# Distort the right half of the image
print 'Distorting image...'
distorted = lena.copy()
distorted[:, height / 2:] += 0.075 * np.random.randn(width, height / 2)

# Extract all clean patches from the left half of the image
print 'Extracting clean patches...'
t0 = time()
patch_size = (7, 7)
Example #27
0
                shift = numpy.zeros(2)
                count = min(paths[key]["count"])
                for npa in paths[key]["shift"]:
                    shift += npa
                d.append({"path": key, "shift": shift, "count": count})
            d.sort(mysort)
        return d


if __name__ == "__main__":
    # lena1 = numpy.zeros((512, 512))
    # scipy.lena()
    # lena1[100:150, 160:200] = 1
    ao1, ao2 = 5, 3
    print ("Absolute offset is %s,%s" % (ao1, ao2))
    lena1 = scipy.lena()
    lena2 = numpy.zeros_like(lena1)
    lena2[ao1:, ao2:] = lena1[:-ao1, :-ao2]
    #    out = Visual_SURF(lena1, lena2)
    """
    out = feature.surf2(lena1, lena2, verbose=1)
    print "clacShift", calcShift(out)

#    raw_input("Enter to continue")
    out2 = feature.reduce_orsa(out)
#    print "SURF: %s keypoint; ORSA -> %s" % (out.shape[0], out2.shape[0])
#    out = out2
    print "*" * 80
#    out = feature.sift2(lena1, lena2, verbose=1)
    out = Visual_SIFT(lena1, lena2)
    print "clacShift", calcShift(out)
Example #28
0
import numpy as np
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt

lena = scipy.lena()
lx, ly = lena.shape
# Copping
crop_lena = lena[lx / 4:-lx / 4, ly / 4:-ly / 4]
# up <-> down flip
flip_ud_lena = np.flipud(lena)
# rotation
rotate_lena = ndimage.rotate(lena, 45)
rotate_lena_noreshape = ndimage.rotate(lena, 45, reshape=False)

plt.figure(figsize=(12.5, 2.5))

plt.subplot(151)
plt.imshow(lena, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(152)
plt.imshow(crop_lena, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(153)
plt.imshow(flip_ud_lena, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(154)
plt.imshow(rotate_lena, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(155)
plt.imshow(rotate_lena_noreshape, cmap=plt.cm.gray)
import scipy as sp
import numpy as np
import pylab as pl

l = sp.lena()
l_ = l[235:235+153, 205:162+205]

t = pl.imread('tarek.jpg')
t = t[::-1, ...]
t_ = t.sum(axis=-1)

################################################################################
pl.figure(0, figsize=(12, 4.5))
pl.gray()
pl.clf()
pl.axes([0, 0, 0.3, 1])
pl.imshow(t_.copy())
pl.axis('off')
pl.axes([0.33, 0, 0.3, 1])
pl.imshow(l_.copy())
pl.axis('off')

t_ = t_.astype(np.float)
t_ /= t_.max()

l_ = l_.astype(np.float)
l_ /= l_.max()

pl.axes([0.66, 0, 0.3, 1])
pl.imshow(t_ + l_)
pl.axis('off')
from numpy import float32
from pylab import imshow, figure, show
from scipy import signal, lena

figure()
lena = lena()
lena = lena.astype(float32)
imshow(lena)

figure()
fl = signal.medfilt2d(lena,[15,15])
imshow(fl)

show()
Example #31
0
    while len(stack)>0:
      p=stack.pop()
      for pp in map(lambda x:(min(max(x[0]+p[0],0),h-1)*w+min(max(x[1]+p[1],0),w-1)) ,footprintp):
        p2=(pp//w,pp%w)
        if (img[p2]==img[p]) and L[p2]<0:
          assert(L[p]>=0)
          L[p2]=L[p]
          stack.append(p2)
  for x in imgsrt:
    p=(x//w,x%w)
    if L[p]<0:
      #surroundlabels=LR.take(map(lambda x:(min(max(x[0]+p[0],0),h-1)*w+min(max(x[1]+p[1],0),w-1)) ,footprintp))
      surroundlabels=collect_around(p)
      surroundlabels=filter(lambda x:x>=0,surroundlabels)
      if len(set(surroundlabels))<=1:
        if len(surroundlabels)==0:
          L[p]=clabel
          propagate_around(p)
          clabel+=1
        else:
          L[p]=surroundlabels[0]
          propagate_around(p)
      else:
        L[p]=random.choice(surroundlabels)
        propagate_around(p)
  return L

if __name__=="__main__":
  r=watershed(scipy.ndimage.distance_transform_edt(scipy.lena()[::4,::4]>128).astype(numpy.uint8),F1)
  pylab.clf();pylab.imshow(r);pylab.show()
Example #32
0
                    return self.splines.contents
	def __init__(self):
		self.opts = at_fitting_opts_new()
	        self.output_opts = at_output_opts_new()
	def trace(self,img):
		ab=NumPy2AtBitmap(img)
		splines = at_splines_new(ab, 
	                         self.opts, 
		     	         at_msg_func(),  
				 at_address())	
		at_bitmap_free(ab)
		return AutoTrace.AutoTraceResult(splines)				 
        def output(self,filename,tracing):
		a=at_output_get_handler_by_suffix(c_char_p(filename.split('.')[-1]))
		b=_libc.fopen(filename,"wb")
	        at_splines_write(a,b,"",self.output_opts,
				tracing.splines,
				at_msg_func(),
				at_address()
				)	
		_libc.fclose(b)                                
				 
if __name__=="__main__":
	import scipy
	
	xl=scipy.lena().astype(numpy.uint8).reshape(512,512,1)
	at=AutoTrace()
	at.opts.contents.despeckle_level=10
	print at.trace(xl).__dict__()

				
Example #33
0
import numpy as np
import scipy as sp
import pylab as pl
from scipy import ndimage, signal
l = sp.lena()[200:-140, 190:-150]
l = l / float(l.max())
pl.figure(figsize=(12, 4.5))
pl.axes([0.15, 0, 0.3, 1])
pl.gray()
pl.imshow(l, vmin=0, vmax=1)
pl.title('Ground truth')
pl.axis('off')
pl.axes([0.5, 0, 0.3, 1])
g = l + .13 * np.random.normal(size=l.shape)
pl.imshow(g, vmin=0, vmax=1)
pl.title('Noisy observation')
pl.axis('off')
import numpy as np
import scipy
import matplotlib.pyplot as plt

lena = scipy.lena()
lena[10:13, 20:23]
lena[100:120] = 255

lx, ly = lena.shape
X, Y = np.ogrid[0:lx, 0:ly]
mask = (X - lx/2)**2 + (Y - ly/2)**2 > lx*ly/4
lena[mask] = 0
lena[range(400), range(400)] = 255

plt.figure(figsize=(3,3))
plt.axes([0, 0, 1, 1])
plt.imshow(lena, cmap=plt.cm.gray)
plt.axis('off')

plt.show()
Example #35
0
def test_write_frame_image():
    img = Image.fromarray(lena()).convert("RGB")
    b = TheoraEncoder(VIDEO_DIR+"/b.ogv", img.size[0], img.size[1])
    b.write_frame_image(img)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn import cluster, datasets
import scipy as sp
import numpy as np

try:
	lena = sp.lena()
except AttributeError:
	from scipy import misc
	lena = misc.lena()

X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array

k_means = cluster.KMeans(n_clusters=5, n_init=1)
k_means.fit(X) 
values = k_means.cluster_centers_.squeeze()

labels = k_means.labels_
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape

plt.imshow(lena_compressed, cmap = cm.Greys_r)
plt.show()
import numpy as np
import scipy
import matplotlib.pyplot as plt

l = scipy.lena()
import numpy as np
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt

l = scipy.lena()
sx, sy = l.shape
X, Y = np.ogrid[0:sx, 0:sy]


r = np.hypot(X - sx/2, Y - sy/2)

rbin = (20* r/r.max()).astype(np.int)
radial_mean = ndimage.mean(l, labels=rbin, index=np.arange(1, rbin.max() +1))

plt.figure(figsize=(5,5))
plt.axes([0, 0, 1, 1])
plt.imshow(rbin, cmap=plt.cm.spectral)
plt.axis('off')

plt.show()
def plotTest02():
	import numpy as np
	import scipy as sp
	import matplotlib.pyplot as plt

	from sklearn import cluster

	n_clusters = 5
	np.random.seed(0)

	try:
		lena = sp.lena()
	except AttributeError:
		from scipy import misc
		lena = misc.lena()

	X = lena.reshape((-1, 1))
	k_means = cluster.KMeans(n_clusters = n_clusters, n_init = 4)
	k_means.fit(X)

	values = k_means.cluster_centers_.squeeze() #这是获得聚类中心
	labels = k_means.labels_

	lena_compressed = np.choose(labels, values)
	lena_compressed.shape = lena.shape

	vmin = lena.min()
	vmax = lena.max()

	#original
	plt.figure(1, figsize = (3, 2.2))
	plt.imshow(lena, cmap = plt.cm.gray, vmin = vmin, vmax = vmax)


	#compressed data
	plt.figure(2, figsize = (3, 2.2))
	plt.imshow(lena_compressed, cmap = plt.cm.gray, vmin = vmin, vmax = vmax)

	#这里面有一些函数要搞清楚意义是什么
	#equal bins lena
	regular_values = np.linspace(0, 256, n_clusters + 1)
	regular_labels = np.searchsorted(regular_values, lena) - 1
	regular_values = 0.5 * (regular_values[1:] + regular_values[:-1]) #mean
	regular_lena = np.choose(regular_labels.ravel(), regular_values)

	regular_lena.shape = lena.shape

	plt.figure(3, figsize=(3, 2.2))
	plt.imshow(regular_lena, cmap = plt.cm.gray, vmin = vmin, vmax = vmax)


	#histogram
	plt.figure(4, figsize = (3, 2.2))
	plt.clf()
	plt.axes([0.01, 0.01, 0.98, 0.98])
	plt.hist(X, bins = 256, color = '0.5', edgecolor = '.5')
	plt.yticks()
	plt.xticks(regular_values)

	values = np.sort(values)
	for center_1, center_2 in zip(values[:-1], values[1:]):
		plt.axvline(0.5 * (center_1 + center_2), color = 'b')

	for center_1, center_1 in zip(regular_values[:-1], regular_values[1:]):
		plt.axvline(0.5 * (center_1 + center_2), color = 'b', linestyle = '--')

	plt.show()
Example #40
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This temporary script file is located here:
C:\Users\Administrator\.spyder2\.temp.py
"""
import pylab as pl
import scipy as sp
img = sp.lena()
pl.imshow(img, cmap=pl.cm.gray)
img2 = img[:-2, 1:-1] - img[2:, 1:-1] + img[1:-1, :-2] - img[1:-1, 2:]
pl.figure()
pl.imshow(img2, cmap=pl.cm.gray)
pl.show()
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause

import numpy as np
import scipy as sp
import pylab as pl

from sklearn import cluster

n_clusters = 5
np.random.seed(0)

try:
    lena = sp.lena()
except AttributeError:
    # Newer versions of scipy have lena in misc
    from scipy import misc
    lena = misc.lena()
X = lena.reshape((-1, 1))  # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_

# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape

vmin = lena.min()
Example #42
0
#CV_FOURCC('U', '2', '6', '3') = H263 codec
#CV_FOURCC('I', '2', '6', '3') = H263I codec
#CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec


class CvVideoWriter:
   def __init__(self,fname,fps = 25,frameW = 320,frameH  = 200, codec="MJPG", isColor=1):
       self.writer=cvCreateVideoWriter(fname,
                                       CV_FOURCC(codec[0],codec[1],codec[2],codec[3]),
                                       fps,
                                       cvSize(frameW,frameH),
                                       isColor)
   def push(self,img):
        cvWriteFrame(self.writer,img.copy('C')) 
   def __del__(self):
       pass
        #cvReleaseVideoWriter(self.writer)

# import pycvf.lib.video.cvvideowriter as cvw; cvw.CvVideoWriter("/tmp/out1.avi")
if __name__ == "__main__":
  import numpy,scipy
  from pycvf.lib.graphics.rescale import Rescaler2d
  rsc=Rescaler2d((320,200))
  c=CvVideoWriter("/tmp/test.mpg")
  ib=scipy.lena().reshape((512,512,1)).repeat(3,axis=2)
  for i in range(100):
    print "Frame ",i
    ib[:,:,0]=i*3
    c.push(rsc.process(ib).astype(numpy.uint8))
  
  
Example #43
0
                  #type_converters = converters.blitz
                 )
           
      assert(not context.has_key("__inlineargs__"))
      context["__inlineargs__"]=args
      context["__inlinekwargs__"]=kwargs
      r= eval("inline(*__inlineargs__,**__inlinekwargs__)",globals(),context)
      context["__inlineargs__"]=None
      return r
    return fct
    

if __name__=="__main__":
  import time
  st=time.clock()
  lena=scipy.lena().reshape(512,512,1).repeat(3,axis=2).astype(numpy.uint8).swapaxes(0,1).copy('F')
  cimg_code("do_test( a_array );",
"""
#include <CImg.h>
using namespace cimg_library;

int do_test(PyArrayObject * npimg ) {
   assert(npimg->nd==3);
   printf("%p %d x %d x %d\\n",npimg->data,npimg->dimensions[1],npimg->dimensions[0],npimg->dimensions[2]);
    CImg<unsigned char> image(npimg->data,npimg->dimensions[1],npimg->dimensions[0],1,npimg->dimensions[2]), visu(500,400,1,3,0);
    image=image.blur(2.5);
    return 0;
}
""",True)(a=lena)
  print "done in ",time.clock()-st, "seconds";
# -*- coding: utf-8 -*-
"""
对图像进行浮雕处理
"""
import pylab as pl
import scipy as sp
img = sp.lena()
pl.imshow(img, cmap=pl.cm.gray)
img2 = img[:-2,1:-1]-img[2:,1:-1]+img[1:-1, :-2]-img[1:-1,2:]
pl.figure()
pl.imshow(img2, cmap=pl.cm.gray)
pl.show()