def build_from_patches(patches, img_size):
    patch_size = int(np.sqrt(patches.shape[1]))
    assert patch_size ** 2 == patches.shape[1], "Non square patch size"
    patches = patches.reshape((patches.shape[0], patch_size, patch_size))
    img = toimage(reconstruct_from_patches_2d(patches, (img_size[1],img_size[0])))
    return img
示例#2
0
def whiten_images(X, verbose=True, patch_size=3):
    '''X: images, shape (num_images, h, w, num_channels).'''
    h, w, c = X.shape[1:]
    for idx in range(X.shape[0]):
        if verbose and idx % 1000 == 0:
            print(idx)
        im = X[idx]
        p = image.extract_patches_2d(im, (patch_size, patch_size))
        if p.ndim < 4:
            p = p[:, :, :, None]
        p -= p.mean((1, 2))[:, None, None, :]
        im = image.reconstruct_from_patches_2d(p, (h, w, c))
        p = image.extract_patches_2d(im, (patch_size, patch_size))
        p = p.reshape(p.shape[0], -1)

        cov = p.T.dot(p)
        s, U = np.linalg.eigh(cov)
        s[s <= 0] = 0
        s = np.sqrt(s)
        ind = s < 1e-8 * s.max()
        s[ind == False] = 1. / np.sqrt(s[ind == False])
        s[ind] = 0

        p = p.dot(U.dot(np.diag(s)).dot(U.T))
        p = p.reshape(p.shape[0], patch_size, patch_size, -1)
        X[idx] = image.reconstruct_from_patches_2d(p, (h, w, c))
示例#3
0
def information_gain(input, target, params=None):
    """
    Method build a CNN according to params. CNN will be composed of several convolutional layers which should convert
    input hyperspectral cube into target hyperspectral cube.
    :param input: hyperspectral cube of input intensities
    :param target: hyperspectral cube of target intensities
    :param params: parameters for building CNN
    :return: tuple
        diff = difference between target and approximation,
        approx = approximated target from input,
        model = builded (and trained) CNN
    """

    if params is None:
        params = {"batch": 64, "patch_size": (64, 64)}

    patches = image.extract_patches_2d(input,
                                       params["patch_size"],
                                       max_patches=500,
                                       random_state=1234)
    targets = image.extract_patches_2d(target,
                                       params["patch_size"],
                                       max_patches=500,
                                       random_state=1234)

    model = __build_network(input.shape[2], target.shape[2], params)
    model.compile(optimizer="adam",
                  metrics=['accuracy'],
                  loss="mean_squared_error")
    model.fit(patches, targets, batch_size=params["batch"], epochs=10)

    approx_patches = model.predict(patches, batch_size=params["batch"])
    approx = image.reconstruct_from_patches_2d(approx_patches, target.shape)
    diff = target - approx
    return diff, approx, model
示例#4
0
def test_reconstruct_patches_perfect_color():
    face = orange_face
    p_h, p_w = 16, 16

    patches = extract_patches_2d(face, (p_h, p_w))
    face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
    np.testing.assert_array_almost_equal(face, face_reconstructed)
示例#5
0
 def reconstruct_individual_spectra(self,
                                    w=None,
                                    randomize=False,
                                    plotting=False,
                                    rectify=True,
                                    **kwargs):
     """fit each dictionary component to self.data
     inputs:
         w - per-component reconstruction weights [None=calculate weights]
         randomize - randomly permute components after getting weights [False]
         plotting - whether to subplot individual spectrum reconstructions [True]
         rectify- remove negative ("dark energy") from individual reconstructions [True]
         **kwargs - keyword arguments for plotting
     returns:
         self.X_hat_l - list of indvidual spectrum reconstructions per dictionary atom
     """
     omp_args = {}
     self.reconstruct_spectrum(w, randomize, **omp_args)
     w, components = self.w, self.D.components_
     self.X_hat_l = []
     for i in range(len(self.w.T)):
         r = np.array(
             (np.matrix(w)[:, i] * np.matrix(components)[i, :])).reshape(
                 -1, *self.patch_size)
         X_hat = reconstruct_from_patches_2d(r, self.X.shape)
         if self.log_amplitude:
             X_hat = np.exp(X_hat) - 1.0
         if rectify:  # half wave rectification
             X_hat[X_hat < 0] = 0
         self.X_hat_l.append(X_hat)
     if plotting:
         self.plot_individual_spectra(**kwargs)
    def reconstruct_individual_spectra(self, w=None, randomize=False, plotting=False, rectify=True, **kwargs):
    	"""fit each dictionary component to self.data
        inputs:
            w - per-component reconstruction weights [None=calculate weights]
            randomize - randomly permute components after getting weights [False]
            plotting - whether to subplot individual spectrum reconstructions [True]
            rectify- remove negative ("dark energy") from individual reconstructions [True]
            **kwargs - keyword arguments for plotting
        returns:
            self.X_hat_l - list of indvidual spectrum reconstructions per dictionary atom
        """
        omp_args = {}
        self.reconstruct_spectrum(w, randomize, **omp_args)
        w, components = self.w, self.D.components_
        self.X_hat_l = []
        for i in range(len(self.w.T)):
	    	r=np.array((np.matrix(w)[:,i]*np.matrix(components)[i,:])).reshape(-1,*self.patch_size)
        	X_hat = reconstruct_from_patches_2d(r, self.X.shape)
                if self.log_amplitude:
                    X_hat = np.exp(X_hat) - 1.0
                if rectify: # half wave rectification
                    X_hat[X_hat<0] = 0
                self.X_hat_l.append(X_hat)
        if plotting:
            self.plot_individual_spectra(**kwargs)
示例#7
0
    def _adj_op(self, coeffs, atoms, dtype="array"):
        """ Adjoint operator.

        This method returns the reconsructed image from the sparse
        coefficients.

        Remark: This method only works for squared patches

        Parameters
        ----------
        coeffs: ndarray of floats,
                2d matrix dim nb_patches*nb_components,
                the sparse coefficients.
        atoms: ndarray of floats,
                2d matrix dim nb_components*nb_pixels_per_patch,
                the dictionary components.
        dtype: str, default 'array'
            if 'array' return the data as a ndarray, otherwise return a
            pysap.Image.

        Returns
        -------
        ndarray, the reconstructed data.
        """
        image = numpy.dot(coeffs, atoms)
        image = image.reshape(image.shape[0], *self.patches_shape)
        return reconstruct_from_patches_2d(image, self.img_shape)
示例#8
0
文件: image.py 项目: bblais/Classy
def patch_vectors_to_images(origdata,verbose=True):
    from sklearn.feature_extraction.image import reconstruct_from_patches_2d

    data=Struct()
    data.DESCR="Images"
    data.target_names=origdata.target_names
    data.files=origdata.files
    data.targets=origdata.original_targets
    data.data=[]
    
    max_vector_number=len(data.targets)
    
    patch_array=[]
    for c in range(max_vector_number):
        patches=[vec.reshape(origdata.shape) 
                        for vec,i in zip(origdata.vectors,origdata.original_vector_number) if i==c]
        patch_array=np.array(patches)
    
        if origdata.overlap:
            data.data.append(reconstruct_from_patches_2d(patch_array,origdata.original_shapes[c]))
        else:
            data.data.append(reconstruct_from_patches_2d_nooverlap(patch_array,origdata.original_shapes[c]))
    
    if verbose:
        classy.datasets.summary(data)

    return data
示例#9
0
def recon_image_by_ElasticNet(im, q, A, lam=0.5, patch_size=8):
    """ 画像の再構成 """
    c = np.ones((A.shape[0], 1))
    Ac = np.hstack([c, A])
    recon_patches = np.dot(Ac, q.T).T.reshape((-1, patch_size, patch_size))
    recon = reconstruct_from_patches_2d(recon_patches, im.shape)
    return (im * lam + recon) / (lam + 1.)
示例#10
0
def test_reconstruct_patches_perfect_color():
    lena = orange_lena
    p_h, p_w = 16, 16

    patches = extract_patches_2d(lena, (p_h, p_w))
    lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
    np.testing.assert_array_equal(lena, lena_reconstructed)
示例#11
0
def denoise_overlapped_strides(strides=(3, 3)):  #1 2 4 11

    #print '=== OVERLAPPING PATCHES',strides,'STRIDES ==============================='
    vidcap = cv2.VideoCapture(te_noisy_video)
    fname = te_noisy_video.rsplit('/', 1)[-1][:-4]
    outfile = './outputs/uber_video_llnet.mp4'
    writer = FFmpegWriter(outfile, outputdict={'-r': 24.4})
    writer = FFmpegWriter(outfile)
    i = 0
    while True:
        ret, image = vidcap.read()
        if not ret: break
        i += 1
        print("On Frame", i)
        te_noisy_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        test_set_x, te_h, te_w = load_data_overlapped_strides(
            te_dataset=te_noisy_image, patch_size=patch_size, strides=strides)
        im_ = test_set_x.get_value()
        im_noisy = im_.reshape((im_).shape[0], *patch_size)
        rec_n = im.reconstruct_from_patches_2d(im_noisy, (te_h, te_w))
        reconstructed = theano.function([],
                                        sda.logLayer.y_pred,
                                        givens={sda.x: test_set_x},
                                        on_unused_input='warn')
        result = reconstructed()
        im_recon = result.reshape((result).shape[0], *patch_size)
        rec_r = reconstruct_from_patches_with_strides_2d(im_recon,
                                                         (te_h, te_w),
                                                         strides=strides)
        writer.writeFrame(rec_r)
    writer.close()
def test_reconstruct_patches_perfect_color():
    lena = orange_lena
    p_h, p_w = 16, 16

    patches = extract_patches_2d(lena, (p_h, p_w))
    lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
    np.testing.assert_array_equal(lena, lena_reconstructed)
示例#13
0
def test_reconstruct_patches_perfect_color():
    face = orange_face
    p_h, p_w = 16, 16

    patches = extract_patches_2d(face, (p_h, p_w))
    face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
    np.testing.assert_array_almost_equal(face, face_reconstructed)
示例#14
0
    def _adj_op(self, coeffs, atoms):
        """Private Adjoint operator.

        This method returns the reconsructed image from the sparse
        coefficients.

        Parameters
        ----------
        coeffs: ndarray of floats,
                2d matrix dim nb_patches*nb_components,
                the sparse coefficients.
        atoms: ndarray of floats,
                2d matrix dim nb_components*nb_pixels_per_patch,
                the dictionary components.

        Returns
        -------
        ndarray, the reconstructed data.

        Notes
        -----
        This method only works for squared patches
        """
        image = numpy.dot(coeffs, atoms)
        image = image.reshape((image.shape[0], *self.patches_shape))
        return reconstruct_from_patches_2d(image, self.img_shape)
示例#15
0
def test_reconstruct_patches_perfect():
    face = downsampled_face
    p_h, p_w = 16, 16

    patches = extract_patches_2d(face, (p_h, p_w))
    face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
    np.testing.assert_array_equal(face, face_reconstructed)
示例#16
0
def test_reconstruct_patches_perfect():
    face = downsampled_face
    p_h, p_w = 16, 16

    patches = extract_patches_2d(face, (p_h, p_w))
    face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
    np.testing.assert_array_equal(face, face_reconstructed)
示例#17
0
    def reconstruct_image(self,
                          path,
                          downscale_factor=None,
                          patch_size=10,
                          is_matrix=False):
        print('reconstructing given image...')
        if downscale_factor is None:
            downscale_factor = self.downscale_factor

        t0 = time()
        dims = self.get_downscaled_dims(path,
                                        downscale_factor,
                                        is_matrix=is_matrix)
        patches = self.image_to_patches(path,
                                        patch_size=patch_size,
                                        downscale_factor=downscale_factor,
                                        is_matrix=is_matrix,
                                        is_recons=True)
        self.nmf = Online_NMF(patches, self.n_components, self.iterations,
                              self.batch_size)
        code = self.nmf.sparse_code(patches, self.W)
        print('Reconstructed in %.2f seconds' % (time() - t0))
        patches_recons = np.dot(self.W, code).T
        patches_recons = patches_recons.reshape(patches_recons.shape[0],
                                                patch_size, patch_size)
        img_recons = reconstruct_from_patches_2d(patches_recons,
                                                 (dims[0], dims[1]))
        self.show_array(img_recons)
        return code
示例#18
0
 def decode(self, img, code):
     height, width, channels = img.shape
     patches = np.dot(code, self.dico.components_)
     patches += self.tmpIntercept
     patches = patches.reshape(len(self.tmpData), *self.patch_size)
     if self.dico.transform_algorithm == 'threshold':
         patches -= patches.min()
         patches /= patches.max()
     reconstruction = reconstruct_from_patches_2d(patches,
                                                  (height, width, channels))
     return reconstruction
示例#19
0
def medianFilter(matrix, dimension, P, Q):
    patches = image.extract_patches_2d(
        matrix, dimension)  # Turn into Patches the main Matrix
    new_image = []
    for patch in patches:
        new_patch = medfilt2d(patch, kernel_size=3)
        new_image.append(new_patch)
    new_image = np.asarray(new_image)
    reconstructed_image = image.reconstruct_from_patches_2d(new_image,
                                                            image_size=(P, Q))
    return reconstructed_image
示例#20
0
def reconstruct_image(test_image, dico, V, patch_size = (7, 7), height=299, width=299):
    data = extract_patches_2d(test_image, patch_size)
    data = data.reshape(data.shape[0], -1)
    data -= np.mean(data, axis=0)
    data /= np.std(data, axis=0)
    dico.set_params(transform_algorithm='omp', transform_n_nonzero_coefs=1)
    code = dico.transform(data)
    patches = np.dot(code, V)
    patches = patches.reshape(len(data), *patch_size)
    reconstruction = reconstruct_from_patches_2d(patches, (height, width))
    return reconstruction
def reconstructImages(transform_algorithms):
    reconstructions = {}
    
    for title, transform_algorithm, kwargs in transform_algorithms:
        reconstructions[title] = img2.copy()
        dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
        code = dico.transform(data)
        patches = np.dot(code, V)
        patches = patches.reshape(len(data), *patch_size)
        reconstructions[title] = reconstruct_from_patches_2d(patches, img2.shape)
        
    return(reconstructions)
def combine_patches_grid(in_patches, out_shape):
    '''Reconstruct an image from these `patches`

    input shape: (rows, cols, channels, patch_row, patch_col)
    '''
    num_rows, num_cols = in_patches.shape[:2]
    num_channels = in_patches.shape[-3]
    patch_size = in_patches.shape[-1]
    num_patches = num_rows * num_cols
    in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size))  # (patches, channels, pr, pc)
    in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
    recon = reconstruct_from_patches_2d(in_patches, out_shape)
    return recon.transpose(2, 1, 0)
示例#23
0
 def reconstruct_spectrum(self, w=None, randomize=False):
     data = self.data
     components = self.D.components_
     if w is None:
         self.w = self._get_approximation_coefs(data, components)
         w = self.w
     if self.standardize:
         for comp in components: comp  = comp * self.std + self.mn
     if randomize:
         components = np.random.permutation(components)
     recon = np.dot(w, components).reshape(-1,self.patch_size[0],self.patch_size[1])
     self.X_hat = reconstruct_from_patches_2d(recon, self.X.shape)
     return self
def denoise(noisy_image, num_atoms, sparsity, patch_size):
    # patch_size = (8,8)
    patches = extract_patches_2d(noisy_image, patch_size)
    data = np.resize(patches,
                     (patches.shape[0], patch_size[0] * patch_size[1]))

    dictionary, sparse_vecs = ksvd(data, num_atoms, sparsity)

    denoised_image = dictionary.dot(sparse_vecs)
    denoised_image = np.resize(
        denoised_image, (patches.shape[0], patch_size[0], patch_size[1]))

    return reconstruct_from_patches_2d(denoised_image, noisy_image.shape)
示例#25
0
 def reconstruct_spectrum(self, w=None, randomize=False):
     data = self.data
     components = self.D.components_
     if w is None:
         self.w = self._get_approximation_coefs(data, components)
         w = self.w
     if self.standardize:
         for comp in components: comp  = comp * self.std + self.mn
     if randomize:
         components = np.random.permutation(components)
     recon = np.dot(w, components).reshape(-1,self.patch_size[0],self.patch_size[1])
     self.X_hat = reconstruct_from_patches_2d(recon, self.X.shape)
     return self
示例#26
0
def combine_patches_grid(in_patches, out_shape):
    '''Reconstruct an image from these `patches`

    input shape: (rows, cols, channels, patch_row, patch_col)
    '''
    num_rows, num_cols = in_patches.shape[:2]
    num_channels = in_patches.shape[-3]
    patch_size = in_patches.shape[-1]
    num_patches = num_rows * num_cols
    in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size))  # (patches, channels, pr, pc)
    in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
    recon = reconstruct_from_patches_2d(in_patches, out_shape)
    return recon.transpose(2, 1, 0).astype(np.float32)
示例#27
0
 def reconstruct_individual_spectra(self, w=None, randomize=False, plotting=False, **kwargs):
     self.reconstruct_spectrum(w,randomize)
     w, components = self.w, self.D.components_
     self.X_hat_l = []
     for i in range(len(self.w.T)):
         r=np.array((np.matrix(w)[:,i]*np.matrix(components)[i,:])).reshape(-1,self.patch_size[0],self.patch_size[1])
         self.X_hat_l.append(reconstruct_from_patches_2d(r, self.X.shape))
     if plotting:
         plt.figure()            
         for k in range(self.n_components):
             plt.subplot(self.n_components**0.5,self.n_components**0.5,k+1)
             feature_plot(self.X_hat_l[k],nofig=1,**kwargs)
     return self
示例#28
0
 def reconstruct_individual_spectra(self, w=None, randomize=False, plotting=False, **kwargs):
     self.reconstruct_spectrum(w,randomize)
     w, components = self.w, self.D.components_
     self.X_hat_l = []
     for i in range(len(self.w.T)):
         r=np.array((np.matrix(w)[:,i]*np.matrix(components)[i,:])).reshape(-1,self.patch_size[0],self.patch_size[1])
         self.X_hat_l.append(reconstruct_from_patches_2d(r, self.X.shape))
     if plotting:
         plt.figure()            
         for k in range(self.n_components):
             plt.subplot(self.n_components**0.5,self.n_components**0.5,k+1)
             feature_plot(self.X_hat_l[k],nofig=1,**kwargs)
     return self
    def reconstruct_config(self, config, patch_size=20):
        print('reconstructing given configuration...')

        patches = self.array_to_patches(config)
        nmf = Online_NMF(patches)
        code = nmf.sparse_code(patches, self.W)
        patches_recons = np.dot(self.W, code).T
        patches_recons = patches_recons.reshape(patches_recons.shape[0],
                                                patch_size, patch_size)
        img = config
        img_recons = reconstruct_from_patches_2d(patches_recons,
                                                 (img.shape[0], img.shape[1]))
        self.show_array(img_recons)
        return code
def run(args):
    filename = args['<filename>']
    directoryname = args['<directoryname>']

    try:
        with open(directoryname + 'compression.matrix', 'r') as infile:
            compression_matrix = pickle.load(infile)
    except:
        print 'Compression matrix not found. Create the compression matrix with'
        print 'train_compression.py and pass the location as argument 2 to this script.'

    print 'Resizing image...'
    image = transform.resize(color.rgb2grey(image_io.imread(filename)),
                             IMAGE_SIZE)

    print 'Saving resized, uncompressed image...'
    image_io.imsave('original.jpg', transform.resize(image, (600, 800)))

    print 'Blocking image...'
    image_blocks = get_blocks(image)

    print 'Vectorizing blocks...'
    image_vects = [
        matrix(block_to_vect(image_blocks[i]))
        for i in range(0, len(image_blocks))
    ]

    print 'Compressing block vectors...'
    compressed_vects = [
        compress_vect(image_vects[i], compression_matrix)
        for i in range(0, len(image_vects))
    ]

    print 'Compression ratio = ' + str(
        float(len(compressed_vects[0][0])) / float(len(image_vects[0])))

    print 'Decompressing blocks...'
    decomp_blocks = array([
        recover_block(dot(compression_matrix, compressed_vects[i].transpose()))
        for i in range(0, len(compressed_vects))
    ])

    print 'Saving image...'

    new_image = reconstruct_from_patches_2d(decomp_blocks, IMAGE_SIZE)

    new_image_big = transform.resize(new_image, (600, 800))

    image_io.imsave('compressed.' + str(len(compressed_vects[0][0])) + '.jpg',
                    new_image_big)
示例#31
0
def inverse_patch_transform(data, shape):
    """Transforms data from array of the form (w**2, N) or (w**2*l, N) where w
    is the patch width, l is the number of bands (in the case of 3D data) and
    N is the number of patches into 2D/3D array.

    Arguments
    ---------
    data: (w**2, N) or (w**2*l, N) numpy array
        The input data.
    shape: (m, n) or (m, n, l)
        The image shape.

    Returns
    -------
    ref: (m, n) or (m, n, l) numpy array
        The input image.
    """
    N = data.shape[1]

    if len(shape) == 2:

        w = int(np.sqrt(data.shape[0]))

        data_r = data.T.reshape((N, w, w))
        return image.reconstruct_from_patches_2d(data_r, shape)

    elif len(shape) == 3:

        B = shape[2]
        w = int(np.sqrt(data.shape[0] / B))

        data_r = data.T.reshape((N, w, w, B))
        return image.reconstruct_from_patches_2d(data_r, shape)

    else:
        raise ValueError('Invalid length for shape.')
示例#32
0
    def denoise(self, image_file, out_image_file):
        img = util.img_as_float(io.imread(image_file))
        patches = image.extract_patches_2d(img, self.patch_size)
        signals = patches.reshape(patches.shape[0], -1)
        mean = np.mean(signals, axis=1)[:, np.newaxis]
        signals -= mean

        ksvd = KSVD(k_atoms=32, num_iterations=10, tolerance=0.000001)
        D, X = ksvd.run(signals[:self.optimal_fit_size].T)

        X = ksvd.sparse_coding(D, signals.T)

        reduced = (D.dot(X)).T + mean
        reduced_img = image.reconstruct_from_patches_2d(
            reduced.reshape(patches.shape), img.shape)
        io.imsave(out_image_file, clip(reduced_img))
示例#33
0
    def dictionary_learning(
        clean_data: np.ndarray,
        noisy_data: np.ndarray,
        ntiles: int,
        n_components: int = 5,
    ) -> np.ndarray:
        """
        Args:
        """
        from time import time

        t0 = time()
        # extract reference patches from clean data
        npix = clean_data.shape[0]
        patch_npix = int(clean_data.shape[0] / ntiles)
        data = extract_patches_2d(clean_data, (patch_npix, patch_npix))
        dt = time() - t0
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0)
        # learn the dictionary from reference patches
        dico = MiniBatchDictionaryLearning(
            n_components=100,
            alpha=0.1,
            n_iter=500,
            # batch_size=3,
            # fit_algorithm='cd',
            # random_state=rng,
            # positive_dict=True,
            # positive_code=True,
        ).fit(data)
        dt = time() - t0
        components = dico.components_
        # extract reference patches from noisy data
        data = extract_patches_2d(noisy_data, (patch_npix, patch_npix))
        data = data.reshape(data.shape[0], -1)
        intercept = np.mean(data, axis=0)
        data -= intercept
        kwargs = {"transform_n_nonzero_coefs": 2}
        dico.set_params(transform_algorithm="omp", **kwargs)
        code = dico.transform(data)
        patches = np.dot(code, components)
        patches += intercept
        patches = patches.reshape(len(data), *(patch_npix, patch_npix))
        cleaned_img = reconstruct_from_patches_2d(patches, (npix, npix))
        dt = time() - t0
        return cleaned_img
示例#34
0
    def predict(self, img):
        img = (img.numpy()[0].transpose(1, 2, 0) * 255).astype(np.uint8)
        cv2.imshow('predict img in', img)
        cv2.waitKey(1)
        #img=img.numpy()[0]
        height, width, channels = img.shape
        img_patches = extract_patches_2d(
            img, (self.patch_size, self.patch_size)).astype(np.float64)
        img_patches = img_patches.reshape(img_patches.shape[0], -1)
        mean = np.mean(img_patches, axis=0)
        std = np.std(img_patches, axis=0)
        if self.normalize:
            img_patches -= mean
            img_patches /= std
        nearest_wds = self.model.kneighbors(img_patches, return_distance=True)
        knn_patches = np.array([])
        for x in xrange(img_patches.shape[0]):
            idxs = nearest_wds[1][x]

            #use averaging
            new_patch = self.patches[idxs].mean(axis=0)
            #use similarity
            if self.similarity:
                distances = nearest_wds[0][x]
                similarity = 1.0 / distances
                similarity /= similarity.sum()
                new_patch = (self.patches[idxs] *
                             similarity[:, np.newaxis]).sum(axis=0)
            #gaussian spread
            if self.gauss_blur:
                new_patch = np.multiply(new_patch, self.gkernel)

            if knn_patches.ndim == 1:
                knn_patches = np.zeros(
                    (img_patches.shape[0], new_patch.shape[0]))
            knn_patches[x] = new_patch
        knn_patches = knn_patches.reshape(
            knn_patches.shape[0], *(self.patch_size, self.patch_size, 6))
        reconstructed = reconstruct_from_patches_2d(knn_patches,
                                                    (height, width, 6))
        reconstructed_img = reconstructed[:, :, :3].astype(np.uint8)
        #cv2.imshow('recon',reconstructed_img)
        #cv2.waitKey(10000)
        reconstructed_mask = reconstructed[:, :, 3].astype(np.uint8)
        reconstructed_boundary = reconstructed[:, :, 4].astype(np.uint8)
        reconstructed_blend = reconstructed[:, :, 5].astype(np.uint8)
        return reconstructed_img, reconstructed_mask, reconstructed_boundary, reconstructed_blend
示例#35
0
def denoise():
    global D, patch_size, face, width, height, patches, data, ret, dico, code;
    data = extract_patches_2d(face[:, width // 2:], patch_size)
    data = data.reshape(data.shape[0], -1)
    m = np.mean(data, axis=0)
    data -= m;

    
    code = ompcode(D, data, 2).T;
    patches = np.dot(code, D.T);
    patches += m;
    patches = np.array(patches).reshape(len(data), *patch_size)
    ret = face.copy();
    ret[:, width // 2:] = reconstruct_from_patches_2d(patches, (height, width // 2))
    plt.figure()
    plt.imshow(ret, cmap=plt.cm.gray, interpolation='nearest')
    plt.show();
示例#36
0
def stainWSI(slide_name):
    slide_path = str(BASE_TRUTH_DIR / slide_name)
    patches_dir = str(BASE_TRUTH_DIR) + str(exp_folder_name)
    meta_dir = str(BASE_TRUTH_DIR) + "meta/"

    print('patches_dir', patches_dir)
    print('meta_dir', meta_dir)
    assure_path_exists(patches_dir)
    assure_path_exists(meta_dir)

    # read the image
    img = imread(slide_path, mode='RGB')
    print("original image dimensions", img.shape)
    # resize

    img = crop_center(img, 1376, 1376)

    print("resized image dimensions", img.shape)

    # matlabimg.imsave(str(meta_dir) + 'IMG-CROPPED.png', img)
    matlabimg.imsave(str(meta_dir) + 'IMG-CROPPED_1376_1376.png', img)

    # img_array = fromimage(img, flatten=True)
    # print("img_array  dimensions", img_array.shape)

    sys.exit()
    # first split the image to 256x256 patches
    patches = image.extract_patches_2d(img, (32, 32))
    print("patches dimensions", patches.shape)

    for i in range(len(patches)):
        patch_idx = "_" + "%05d" % (i)
        patch_name = str(patches_dir) + "image_0001" + patch_idx + ".png"
        io.imsave(patch_name, patches[i])

    # reconstruct from directory

    patch_dir = str(patches_dir) + "*.png"
    patches = np.array(io.imread_collection(patch_dir))
    reconstructed = (image.reconstruct_from_patches_2d(patches, img.shape))

    print("reconstructed image dimensions", reconstructed.shape)
    import scipy.misc
    scipy.misc.imsave(str(meta_dir) + "reconstructed.png", img)
    print(np.testing.assert_array_equal(img, reconstructed))
示例#37
0
文件: ksvd.py 项目: permfl/comph
def reconstruct_patches(image_patches, image_size):
    """
        Reconstruct an image from image patches.
        Average the values from overlapping pixels

        Args
        ----
            image_patches: 2d ndarray shape (patch_size**2, n_patches)
            image_size: Original image size as a tuple. (height, width)

        Returns
        -------
            Reconstructed image

    """
    size = int(np.sqrt(image_patches.shape[0]))
    p = image_patches.T.reshape((image_patches.shape[1], size, size))
    return reconstruct_from_patches_2d(p, image_size)
示例#38
0
def denoise_overlapped_strides(strides=(3, 3)):  #1 2 4 11

    #print '=== OVERLAPPING PATCHES',strides,'STRIDES ==============================='

    testdata = misc.imread(te_noisy_image, flatten=True)
    fname = te_noisy_image.rsplit('/', 1)[-1][:-4]
    #scipy.misc.imsave('outputs/LLnet_inference_'+fname+'_test.png',testdata)
    shutil.copyfile(te_noisy_image, 'outputs/ori_' + fname + '.png')

    test_set_x, te_h, te_w = load_data_overlapped_strides(
        te_dataset=te_noisy_image, patch_size=patch_size, strides=strides)
    im_ = test_set_x.get_value()
    im_noisy = im_.reshape((im_).shape[0], *patch_size)
    rec_n = im.reconstruct_from_patches_2d(im_noisy, (te_h, te_w))

    reconstructed = theano.function([],
                                    sda.logLayer.y_pred,
                                    givens={sda.x: test_set_x},
                                    on_unused_input='warn')
    result = reconstructed()

    im_recon = result.reshape((result).shape[0], *patch_size)
    rec_r = reconstruct_from_patches_with_strides_2d(im_recon, (te_h, te_w),
                                                     strides=strides)

    scipy.misc.imsave('outputs/LLnet_inference_' + fname + '_out.png', rec_r)

    #    print sda.sigmoid_layers[0].W.get_value().shape
    #    print sda.sigmoid_layers[1].W.get_value().shape
    #    print sda.sigmoid_layers[2].W.get_value().shape
    #    print sda.sigmoid_layers[3].W.get_value().shape
    #    print sda.sigmoid_layers[4].W.get_value().shape
    #    print sda.sigmoid_layers[5].W.get_value().shape
    #    print sda.sigmoid_layers[6].W.get_value().shape

    filters = sda.sigmoid_layers[0].W.get_value()
    print filters.shape
    image = PIL.Image.fromarray(
        tile_raster_images(X=filters.T,
                           img_shape=(17, 17),
                           tile_shape=(4, 20),
                           tile_spacing=(1, 1),
                           scale_rows_to_unit_interval=True))
    image.save('outputs/LLnet_filters.png')
示例#39
0
    def ksvd(self):
        """K-SVD denoising algorithm"""
        P = extract_patches_2d(self.Inoisy, self.ksvd_patch)
        patch_shape = P.shape
        P = P.reshape((patch_shape[0], -1))
        mean = np.mean(P, axis=1)[:, np.newaxis]
        P -= mean

        aksvd = ApproximateKSVD(n_components=self.ksvd_components)
        dico = aksvd.fit(P).components_
        reduced = (aksvd.transform(P)).dot(dico) + mean
        reduced_img = reconstruct_from_patches_2d(reduced.reshape(patch_shape),
                                                  self.shape)

        self.Iksvd = np.clip(reduced_img, 0, 1)
        self.Ilist[self.str2int['ksvd']] = self.Iksvd
        if self.verbose:
            print('K-SVD :', self.Iksvd)
        return ()
示例#40
0
    def reconstruct(self, new_patches, save=False):
        """
        Reconstruct the image with new_patches. Overlapping
        regions are averaged. The reconstructed patches are not saved by default

        self.patches are the same object before and after this method is called,
        as long as save=False

        :param new_patches:
            `ndarray` (patch_size, n_patches). Patches returned from Patches.patches

        :param save:
            Overwrite current patches with new_patches

        :return:
            Reconstructed image
        """
        if self.random is not None or self.max_patches is not None:
            raise ValueError('Cannot reconstruct when random or '
                             'max_patches is not None')

        if self.order != 'C':
            raise ValueError('Can only reconstruct C ordered patches')

        new_patches += self._mean

        if save:
            self._patches = new_patches

        if self.ndim == 2:
            p = new_patches.T.reshape(self._raw_patches_shape)
            reconstructed_image = reconstruct_from_patches_2d(
                patches=p, image_size=self._shape
            )
        elif self.ndim == 3:
            if self.rgb:
                raise NotImplementedError()
            else:
                return self._reconstruct_3d(new_patches)
        else:
            raise ValueError()

        return reconstructed_image
def run(args):
    filename = args['<filename>']
    directoryname = args['<directoryname>']

    try:
        with open(directoryname + 'compression.matrix', 'r') as infile:
           compression_matrix = pickle.load(infile)
    except:
        print 'Compression matrix not found. Create the compression matrix with'
        print 'train_compression.py and pass the location as argument 2 to this script.'

    print 'Resizing image...'
    image = transform.resize(color.rgb2grey(image_io.imread(filename)), IMAGE_SIZE)

    print 'Saving resized, uncompressed image...'
    image_io.imsave('original.jpg', transform.resize(image, (600,800)))

    print 'Blocking image...'
    image_blocks = get_blocks(image)

    print 'Vectorizing blocks...'
    image_vects = [matrix(block_to_vect(image_blocks[i])) for i in range(0, len(image_blocks))]

    print 'Compressing block vectors...'
    compressed_vects = [compress_vect(image_vects[i], compression_matrix) for i in range(0, len(image_vects))]

    print 'Compression ratio = ' + str(float(len(compressed_vects[0][0]))/float(len(image_vects[0])))

    print 'Decompressing blocks...'
    decomp_blocks = array([recover_block(dot(compression_matrix, compressed_vects[i].transpose())) for i in range(0, len(compressed_vects))])

    print 'Saving image...'

    new_image = reconstruct_from_patches_2d(decomp_blocks, IMAGE_SIZE)

    new_image_big = transform.resize(new_image, (600,800))

    image_io.imsave('compressed.' + str(len(compressed_vects[0][0])) + '.jpg', new_image_big)
 def reconstruct_spectrum(self, w=None, randomize=False):
 	"""reconstruct by fitting current 2D dictionary to self.data 
     inputs:
         w - per-component reconstruction weights [None=calculate weights]
         randomize - randomly permute components after getting weights [False]
     returns:
         self.X_hat - spectral reconstruction of self.data
     """
     data = self.data
     components = self.D.components_
     if w is None:
         self.w = self._get_approximation_coefs(data, components)
         w = self.w
     if randomize:
         components = np.random.permutation(components)
     recon = np.dot(w, components)
     if self.zscore:
         recon = recon * self.std
         recon = recon + self.mn
     recon = recon.reshape(-1, *self.patch_size)
     self.X_hat = reconstruct_from_patches_2d(recon, self.X.shape)
     if self.log_amplitude:
         self.X_hat = np.exp(self.X_hat) - 1.0 # invert log transform
def get_dictionary_data(n_comp=20, zero_index=False):
    unlabeled = util.load_unlabeled_training(flatten=False)
    height, width = 32, 32
    n_images = 10000
    patch_size = (8, 8)

    unlabeled = util.standardize(unlabeled)
    np.random.shuffle(unlabeled)

    print('Extracting reference patches...')

    patches = np.empty((0, 64))
    t0 = time()

    for image in unlabeled[:n_images, :, :]:
        data = np.array(extract_patches_2d(image, patch_size, max_patches=0.01))
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20
        patches = np.concatenate([patches, data])

    print('done in %.2fs.' % (time() - t0))

    # whiten the patches
    z = zca.ZCA()
    z.fit(patches)
    z.transform(patches)

    print('Learning the dictionary...')
    t0 = time()
    dico = MiniBatchDictionaryLearning(n_components=n_comp, alpha=1)
    V = dico.fit(patches).components_
    dt = time() - t0
    print('done in %.2fs.' % dt)

    #plt.figure(figsize=(4.2, 4))
    #for i, comp in enumerate(V[:100]):
    #    plt.subplot(10, 10, i + 1)
    #    plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
    #               interpolation='nearest')
    #    plt.xticks(())
    #    plt.yticks(())
    #plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
    #plt.show()

    labeled_data, labels = util.load_labeled_training(flatten=False, zero_index=True)
    labeled_data = util.standardize(labeled_data)

    test_data = util.load_all_test(flatten=False)
    test_data = util.standardize(test_data)

    #util.render_matrix(test_data, flattened=False)

    print('Reconstructing the training images...')
    t0 = time()
    reconstructed_images = np.empty((0, 32, 32))

    for i, image in enumerate(labeled_data):
        data = extract_patches_2d(image, patch_size)
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20

        code = dico.transform(data)
        patches = np.dot(code, V)
        z.transform(patches)
        patches = patches.reshape(len(data), *patch_size)

        data = reconstruct_from_patches_2d(patches, (width, height))
        data = data.reshape(1, 32, 32)
        reconstructed_images = np.concatenate([reconstructed_images, data])

    print('done in %.2fs.' % (time() - t0))

    # flatten
    n, x, y = reconstructed_images.shape
    training_images = reconstructed_images.reshape(reconstructed_images.shape[0], reconstructed_images.shape[1]*reconstructed_images.shape[2])
    assert training_images.shape == (n, x*y)

    print('Reconstructing the test images...')
    t0 = time()
    reconstructed_test_images = np.empty((0, 32, 32))

    for image in test_data:
        data = extract_patches_2d(image, patch_size)
        data = data.reshape(data.shape[0], -1)
        data -= np.mean(data, axis=0)
        data /= np.std(data, axis=0) + 1e-20

        code = dico.transform(data)
        patches = np.dot(code, V)
        z.transform(patches)
        patches = patches.reshape(len(data), *patch_size)

        data = reconstruct_from_patches_2d(patches, (width, height))
        data = data.reshape(1, 32, 32)
        reconstructed_test_images = np.concatenate([reconstructed_test_images, data])

    print('done in %.2fs.' % (time() - t0))

    # flatten
    n, x, y = reconstructed_test_images.shape
    test_images = reconstructed_test_images.reshape(reconstructed_test_images.shape[0], reconstructed_test_images.shape[1]*reconstructed_test_images.shape[2])
    assert test_images.shape == (n, x*y)

    return (training_images, labels, test_images)
示例#44
0
文件: ffoct.py 项目: gsidier/ffoct
				code_i = model.transform(data_i)
				code_i = csr_matrix(code_i)
				code.append(code_i)
			code = sparse.vstack(code)
	if (not restart) or steps[restart] <= steps['RECONSTRUCT']:
		with Timer("Reconstruct images ..."):
			# Reconstruct the input images from the projected patches.
			basis = fit.components_ 
			proj = code.dot(basis)
			proj *= std
			proj += mean
			proj = proj.reshape(len(proj), SAMP_WIDTH, SAMP_HEIGHT)
			approxs = [ ]
			errs = [ ]
			for (master, i1, i2) in zip(masters, idx[:-1], idx[1:]):
				approx = reconstruct_from_patches_2d(proj[i1:i2], master.size[::-1])
				approxs.append(approx)
				errs.append(approx - master)
	if (not restart) or steps[restart] <= steps['BUILD_DISTRIB']:
		with Timer("Build distrib ..."):
			# Each input patch is modelled as a mixture of two basis patches.
			# For each basis patch we build the distribution cond_distrib[i, j] = P(other patch = j | one patch is i).
			nz = code.nonzero()
			x = numpy.zeros((max(nz[0]) + 1, 2), dtype = int)
			x[:] = -1
			for i, j in zip(nz[0], nz[1]):
				if x[i, 0] == -1:
					x[i, 0] = j
				else:
					x[i, 1] = j
			for i, (a, b) in enumerate(x):
    ('Least-angle regression\n5 atoms', 'lars',
     {'transform_n_nonzero_coefs': 5}),
    ('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]

reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
    print(title + '...')
    reconstructions[title] = lena.copy()
    t0 = time()
    dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
    code = dico.transform(data)
    patches = np.dot(code, V)

    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()

    patches += intercept
    patches = patches.reshape(len(data), *patch_size)
    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()
    reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
        patches, (width, height // 2))
    dt = time() - t0
    print('done in %.2fs.' % dt)
    show_with_diff(reconstructions[title], lena,
                   title + ' (time: %.1fs)' % dt)

plt.show()
def rf_reconstruct2(jobs, slice_infos, i):
    t0 = time()
    feats, labels = generate_training_feats(slice_infos, i)
    labels = np.array(labels)
    print(feats.shape, labels.shape)
    RF = train_rf_classifier(feats, labels, no_trees)

    dt1 = time() - t0   
    t0 = time()
    
    test_sl = slice_infos[i]
    image = test_sl.slice_im
    rimage = test_sl.slice_ro
    
    kernels = generate_kernels()
    
    dt2 = time() - t0
    t0 = time()
    
    # break the image into patches; all of these will be classified
    patch_size = (psize, psize)
    # _a stands for "all"
    patches_a = extract_patches_2d(image, patch_size)
    # _p stands for "predict"
    
    patches_r = extract_patches_2d(rimage, patch_size)
    # _r stands for "registered"
    
    dt3 = time() - t0
    t0 = time()
    
    # dump the RF
    #fn_rf = 'rf.joblib'
    #joblib.dump(RF, fn_rf)
    
    dt4 = time() - t0
    t0 = time()
    
    chunk_size = len(patches_a) / float(jobs)
    chunk_size = int(chunk_size / 8)
    
    # save the Random Forest classifier to disk
    fn_rf = "tmp/rf.pkl"
    fd_rf = open(fn_rf, 'wb')
    dill.dump(RF, fd_rf)
    fd_rf.close()
    
    
    # save the kernels to disk
    fn_kern = "tmp/kern.pkl" 
    fd_kern = open(fn_kern, 'wb')
    dill.dump(kernels, fd_kern)
    fd_kern.close()
    
    # list which will contain the filenames of the patch chunks
    fn_chunks = []
    
    # break both patch groups into chunk-sized sets and save each of them
    # to disk
    for j in range(0, len(patches_a), int(chunk_size)):
        # determine the bounds of this chunk
        a = j
        b = j + int(chunk_size)
        if b >= len(patches_a):
            b = len(patches_a) - 1
        
        # create a chunk
        patches_a_chunk = patches_a[a:b]
        patches_r_chunk = patches_r[a:b]
        
        # put it together
        chunk = [(a, b, len(patches_a)), patches_a_chunk, patches_r_chunk]

        # generate a filename for this chunk
        fn_chunk = "tmp/" + "patches_" + str(a) + "_" + str(b) + ".pkl"
        fn_chunks.append(fn_chunk)        
        
        # serialise it to disk
        fd_chunk = open(fn_chunk, 'wb')
        dill.dump(chunk, fd_chunk)
        fd_chunk.close()
        
    
    # check each patch
    if len(sys.argv) >= 2:
        #patches_p = Parallel(n_jobs=jobs)(delayed(classify_patch)(RF, kernels, patches_a, i) for i in range(len(patches_a)))
        #patches_p = Parallel(n_jobs=jobs)(delayed(classify_patch_w)(fn_rf, kernels, patches_a, i) for i in range(len(patches_a)))
        #patches_x = Parallel(n_jobs=jobs)(delayed(classify_patch_p)(fn_rf, kernels, patches_a, patches_r, i, i+int(chunk_size)) for i in range(0, len(patches_a), int(chunk_size)))    
        #patches_x = Parallel(n_jobs=jobs)(delayed(classify_patch_f)(rf_pkl, kernels_pkl, patches_a_pkl, patches_r_pkl, i, i+int(chunk_size)) for i in range(0, len(patches_a), int(chunk_size)))            
        patches_x =  Parallel(n_jobs=jobs)(delayed(classify_patch_group)(fn_rf, fn_kern, fn_chunk) for fn_chunk in fn_chunks)            
        patches_p = []        
        for group in patches_x:
            patches_p.extend(group)
    else:
        patches_p = []
        for i in range(len(patches_a)):
            patches_p.append(classify_patch_w(RF, kernels, patches_a, i))
        
            
    dt5 = time() - t0
    t0 = time()
    
    # reconstruct based on the patch
    recons_im = reconstruct_from_patches_2d(np.asarray(patches_p), image.shape)
    
    dt6 = time() - t0
    t0 = time()
    
    print("Completed Reconstruction {}/{}: {} DT: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(i, len(slice_infos), recons, dt1, dt2, dt3, dt4, dt5, dt6))  
    
    # save reconstruction!
    with open(recons, 'wb') as f:
        dill.dump(recons_im, f)
def combine_patches(patches, out_shape):
    '''Reconstruct an image from these `patches`'''
    patches = patches.transpose(0, 2, 3, 1)
    recon = reconstruct_from_patches_2d(patches, out_shape)
    return recon.transpose(2, 0, 1)
# In[66]:

joblib.dump(mean_inf_data, "mean_sparse_patches.pkl", compress=3)


# In[10]:

reconstructed = mu[-1][np.newaxis, :] + inferred_data["s"][:, 0, :].dot(model_params["W"].T)
reconstructed_patches = np.reshape(reconstructed, (reconstructed.shape[0],) + patchsize)


# In[11]:

rec_data = reconstruct_from_patches_2d(
    reconstructed_patches, (reconstructed_patches.shape[0] + reconstructed_patches.shape[1] - 1, patchsize[1])
)
test_data = reconstruct_from_patches_2d(test_patches, (test_patches.shape[0] + test_patches.shape[1] - 1, patchsize[1]))
joblib.dump(
    {"original": test_data, "reconstructed": rec_data}, "results/reconstructed/{}.pkl".format(model_name), compress=3
)


# ## Validate Model
#
# ### Mean-squared error

# In[12]:

mse = mean_squared_error(test_data, rec_data)
print "Mean-squared error: {} \nshape = {}".format(mse, test_data.shape)
示例#49
0
     {'transform_n_nonzero_coefs': 2}),
    ('Least-angle regression\n5 atoms', 'lars',
     {'transform_n_nonzero_coefs': 5}),
    ('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]

reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
    print(title + '...')
    reconstructions[title] = face.copy()
    t0 = time()
    dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
    code = dico.transform(data)
    patches = np.dot(code, V)

    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()

    patches += intercept
    patches = patches.reshape(len(data), *patch_size)
    if transform_algorithm == 'threshold':
        patches -= patches.min()
        patches /= patches.max()
    reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
        patches, (height, width // 2))
    dt = time() - t0
    print('done in %.2fs.' % dt)
    show_with_diff(reconstructions[title], face,
                   title + ' (time: %.1fs)' % dt)

plt.show()
def combine_patches(in_patches, out_shape, scale):
    '''Reconstruct an image from these `patches`'''
    recon = reconstruct_from_patches_2d(in_patches, out_shape)
    return recon
def imageDenoisingTest01():
	from time import time
	import matplotlib.pyplot as plt
	import numpy as np

	from scipy.misc import lena

	from sklearn.decomposition import MiniBatchDictionaryLearning
	from sklearn.feature_extraction.image import extract_patches_2d
	from sklearn.feature_extraction.image import reconstruct_from_patches_2d

	#Load image and extract patches
	lena = lena() / 256.0




	lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
	lena /= 4.0

	height, width = lena.shape

	#Distort the right half of the image
	print "distorting image"

	distorted = lena.copy()
	distorted[:, height//2:] += 0.075 * np.random.randn(width, height // 2)

	#plt.imshow(distorted[:, :height//2], cmap = plt.cm.gray, interpolation = "nearest")
	#plt.show()

	print "Extacting reference patches"
	#这里是从distorted的左半边抽取patches
	t0 = time()
	patch_size = (7, 7)
	data = extract_patches_2d(distorted[:, :height//2], patch_size)

	#data是 30500 * 7 * 7 维矩阵
	#print data
	#print len(data)
	#print len(data[0][0])

	#plt.imshow(data[0], cmap = plt.cm.gray, interpolation = "nearest")
	#plt.show()

	#print distorted[:, height//2:].shape #一半是256 * 128




	#下面是把patch转换为一维向量, 然后再归一化
	data = data.reshape(data.shape[0], -1)
	data -= np.mean(data, axis = 0)
	data /= np.std(data, axis = 0)

	print 'done in ' + str(time() - t0)


	# Learn the dictionary from reference patches
	print "Learning the dictionary"
	t0 = time()
	#这一步是开始对patches进行学习
	#new 一个model
	dico = MiniBatchDictionaryLearning(n_components = 100, alpha = 1, n_iter = 5000)

	print data.shape  #data是30500 * 49维矩阵
	V = dico.fit(data).components_

	print V.shape #V是100 * 49维矩阵
	dt = time() - t0

	print "done in %.2fs." % dt

	plt.figure(figsize = (4.2, 4))
	for i, comp in enumerate(V[:100]):
		plt.subplot(10, 10, i + 1)
		plt.imshow(comp.reshape(patch_size), cmap = plt.cm.gray_r, interpolation = "nearest")
		plt.xticks(())
		plt.yticks(())

	plt.suptitle("Dictionary learned from lena patches\n" + "Train time %.1fs on %d patches" % (dt, len(data)), fontsize = 16)

	plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)

	def show_with_diff(image, reference, title):
		plt.figure(figsize = (5, 3.3))
		plt.subplot(1, 2, 1)
		plt.title('Image')
		plt.imshow(image, vmin = 0, vmax = 1, cmap = plt.cm.gray, interpolation = "nearest")

		plt.xticks(())
		plt.yticks(())
		plt.subplot(1,2,2)

		difference = image - reference

		plt.title("difference (norm: %.2f)" % np.sqrt(np.sum(difference ** 2)))

		plt.imshow(difference, vmin = -0.5, vmax = 0.5, cmap = plt.cm.PuOr, interpolation = "nearest")
		plt.xticks(())
		plt.yticks(())
		plt.suptitle(title, size = 16)

		plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.02)


	show_with_diff(distorted, lena, "Distorted Image")




	#plt.show()

	#Extract noisy patches and reconstruct them using the dictionary
	#从右半边抽取patches
	print('Extracting noisy pathces...')
	t0 = time()
	data = extract_patches_2d(distorted[:, height//2:], patch_size)
	data = data.reshape(data.shape[0], -1)
	intercept = np.mean(data, axis = 0)
	data -= intercept

	print "done in %.2fs. " % (time() - t0)

	transform_algorithms = [('Orthogonal Matching Pursuit\n1 atom', 'omp',
							{'transform_n_nonzero_coefs': 1}),
							('Orthogonal Matching Pursuit\n2 atoms', 'omp',
							{'transform_n_nonzero_coefs': 2}),
							('Least-angle regression\n5 atoms', 'lars',
							{'transform_n_nonzero_coefs': 5}),
							('Thresholding\n alpha = 0.1', 'threshold',
							{'transform_alpha': 0.1})]

	reconstructions = {}
	for title, transform_algorithm, kwargs in transform_algorithms:
		print title + "..."
		reconstructions[title] = lena.copy()
		t0 = time()
		dico.set_params(transform_algorithm = transform_algorithm, **kwargs)
		code = dico.transform(data) #利用之前训练的模型来获得代表系数 -- code
		patches = np.dot(code, V)

		if transform_algorithm == "threshold":
			patches -= patches.min()
			patches /= patches.max()

		patches += intercept
		patches = patches.reshape(len(data), *patch_size)

		if transform_algorithm == "threshold":
			patches -= patches.min()
			patches /= patches.max()

		reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(patches, (width, height // 2))
		dt = time() - t0
		print "done in %.2fs." % dt
		show_with_diff(reconstructions[title], lena, title + '(time: %.1fs)' % dt)

	plt.show()
示例#52
0
plt.figure(figsize=(12,4))
plt.subplot(1,3,1)
plt.imshow(test_patches[i].T,origin='lower',aspect='auto',interpolation='nearest',vmin=imin,vmax=imax)
plt.subplot(1,3,2)
plt.imshow(reconstructed_patches[i].T,origin='lower',aspect='auto',interpolation='nearest',vmin=imin,vmax=imax)
plt.colorbar()
plt.subplot(1,3,3)
plt.imshow((test_patches[i]-reconstructed_patches[i]).T,origin='lower',aspect='auto',interpolation='nearest')
plt.colorbar()


# In[24]:

from sklearn.feature_extraction.image import reconstruct_from_patches_2d

stim_tl = reconstruct_from_patches_2d(np.exp(test_patches),(500,48))
rec_tl = reconstruct_from_patches_2d(np.exp(reconstructed_patches),(500,48))


# In[25]:

from sklearn.metrics import mean_squared_error

length = 450

plt.figure(figsize=(12,6))
plt.subplot(3,1,1)
plt.imshow(stim_tl[:length].T,origin='lower',aspect='auto',interpolation='nearest')
plt.colorbar()
plt.subplot(3,1,2)
plt.imshow(rec_tl[:length].T,origin='lower',aspect='auto',interpolation='nearest')