Пример #1
0
import sample_images
import sparse_autoencoder

from test_numerical_gradient import diff_grad, check_grad

patch_size = (8, 8)
visible_size = patch_size[0] * patch_size[1]
# hidden_size = 25
hidden_size = 3
weight_decay, sparsity_param, beta = 0.0001, 0.01, 3
# weight_decay, sparsity_param, beta = 0, 0.01, 0

# num_samples = 10
num_samples = 100
images = sample_images.load_matlab_images("../data/IMAGES.mat")
patches = sample_images.sample(images, num_samples, patch_size)

base_sae_cost = partial(
    sparse_autoencoder.cost,
    visible_size=visible_size,
    hidden_size=hidden_size,
    sparsity_param=sparsity_param,
    data=patches,
)


def test_sae_cost():
    threshold = 1e-9 * (num_samples / 50.0)
    theta = sparse_autoencoder.initialize_params(hidden_size, visible_size)
Пример #2
0
#!/usr/bin/env python
import functools

import numpy as np

import pca
import sample_images
import display_network

if __name__=='__main__':
    num_samples = 10000
    num_samples = 10000

    m = sample_images.load_matlab_images('IMAGES_RAW.mat')
    patches = sample_images.sample(m, num_samples, size=(12,12), norm=None)

    display_network.display_network('raw-patches.png', patches)

    # ensure that patches have zero mean
    mean = np.mean(patches, axis=0)
    patches -= mean
    assert np.allclose(np.mean(patches, axis=0), np.zeros(patches.shape[1]))

    U, s, x_rot = pca.pca(patches)

    covar = pca.covariance(x_rot)
    display_network.array_to_file('covariance.png', covar)

    # percentage of variance
     # cumulative sum
    pov = np.array(functools.reduce(
if __name__ == '__main__':
    # Network Architecture 
    patch_size = (8,8)
    visible_size = patch_size[0] * patch_size[1]
    hidden_size = 25
    #hidden_size = 3

    # Training params
    weight_decay, sparsity_param, beta = 0.0001, 0.01, 3
    #weight_decay, sparsity_param, beta = 0, 0.01, 0
    max_iter = 400	        # Maximum number of iterations of L-BFGS to run 

    # Get the samples
    num_samples = 10000
    #num_samples = 10
    images = sample_images.load_matlab_images('IMAGES.mat')
    patches = sample_images.sample(images, num_samples, patch_size)

    # set up L-BFGS args
    theta = sparse_autoencoder.initialize_params(hidden_size, visible_size)
    sae_cost = partial(sparse_autoencoder.cost,
                        visible_size=visible_size, 
                        hidden_size=hidden_size,
                        weight_decay = weight_decay,
                        beta=beta,
                        sparsity_param=sparsity_param,
                        data=patches)

    # Train!
    trained, cost, d = scipy.optimize.lbfgsb.fmin_l_bfgs_b(sae_cost, theta, 
                                                           maxfun=max_iter, 
Пример #4
0
#!/usr/bin/env python
import functools

import numpy as np

import pca
import sample_images
import display_network

if __name__ == '__main__':
    num_samples = 10000
    num_samples = 10000

    m = sample_images.load_matlab_images('IMAGES_RAW.mat')
    patches = sample_images.sample(m, num_samples, size=(12, 12), norm=None)

    display_network.display_network('raw-patches.png', patches)

    # ensure that patches have zero mean
    mean = np.mean(patches, axis=0)
    patches -= mean
    assert np.allclose(np.mean(patches, axis=0), np.zeros(patches.shape[1]))

    U, s, x_rot = pca.pca(patches)

    covar = pca.covariance(x_rot)
    display_network.array_to_file('covariance.png', covar)

    # percentage of variance
    # cumulative sum
    pov = np.array(
if __name__ == '__main__':
    # Network Architecture
    patch_size = (8, 8)
    visible_size = patch_size[0] * patch_size[1]
    hidden_size = 25
    #hidden_size = 3

    # Training params
    weight_decay, sparsity_param, beta = 0.0001, 0.01, 3
    #weight_decay, sparsity_param, beta = 0, 0.01, 0
    max_iter = 400  # Maximum number of iterations of L-BFGS to run

    # Get the samples
    num_samples = 10000
    #num_samples = 10
    images = sample_images.load_matlab_images('IMAGES.mat')
    patches = sample_images.sample(images, num_samples, patch_size)

    # set up L-BFGS args
    theta = sparse_autoencoder.initialize_params(hidden_size, visible_size)
    sae_cost = partial(sparse_autoencoder.cost,
                       visible_size=visible_size,
                       hidden_size=hidden_size,
                       weight_decay=weight_decay,
                       beta=beta,
                       sparsity_param=sparsity_param,
                       data=patches)

    # Train!
    trained, cost, d = scipy.optimize.lbfgsb.fmin_l_bfgs_b(sae_cost,
                                                           theta,