Exemple #1
0
    i = int(t / presentation_time)
    j = np.argmax(dots)
    return test_labels[i] == labels[j]


# --- load the RBM data
# data = np.load('nlif-deep-orig.npz')
data = np.load('nlif-deep.npz')
weights = data['weights']
biases = data['biases']
Wc = data['Wc']
bc = data['bc']

# --- load the testing data
from autoencoder import mnist
_, _, [test_images, test_labels] = mnist()

for images in [test_images]:
    images -= images.mean(axis=0, keepdims=True)
    images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)

# shuffle
rng = np.random.RandomState(92)
inds = rng.permutation(len(test_images))
test_images = test_images[inds]
test_labels = test_labels[inds]

labels = np.unique(test_labels)
n_labels = labels.size

# --- test as ANN
Exemple #2
0
    dtype = theano.config.floatX
    sigma = tt.cast(0.05, dtype=dtype)
    tau_ref = tt.cast(0.002, dtype=dtype)
    tau_rc = tt.cast(0.02, dtype=dtype)
    alpha = tt.cast(1, dtype=dtype)
    beta = tt.cast(1, dtype=dtype)  # so that f(0) = firing threshold
    amp = tt.cast(1. / 63.04, dtype=dtype)  # so that f(1) = 1

    j = alpha * x + beta - 1
    j = sigma * tt.log1p(tt.exp(j / sigma))
    v = amp / (tau_ref + tau_rc * tt.log1p(1. / j))
    return tt.switch(j > 0, v, 0.0)


# --- load the data
train, valid, test = mnist()
train_images, _ = train
valid_images, _ = valid
test_images, _ = test

for images in [train_images, valid_images, test_images]:
    images -= images.mean(axis=0, keepdims=True)
    images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)

# --- pretrain with SGD backprop
shapes = [(28, 28), 500, 200]
funcs = [None, nlif, nlif]
rf_shapes = [(9, 9), None]
rates = [1., 1.]

n_layers = len(shapes) - 1
Exemple #3
0
# os.environ['THEANO_FLAGS'] = 'device=gpu, floatX=float32'
# os.environ['THEANO_FLAGS'] = 'mode=DEBUG_MODE'
import theano
import theano.tensor as tt

import plotting

import autoencoder
reload(autoencoder)
from autoencoder import (rms, mnist, show_recons,
                         FileObject, Autoencoder, DeepAutoencoder)

plt.ion()

# --- load the data
train, valid, test = mnist()
train_images, _ = train
valid_images, _ = valid
test_images, _ = test

for images in [train_images, valid_images, test_images]:
    images -= images.mean(axis=0, keepdims=True)
    images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)

# --- pretrain with SGD backprop
# shapes = [(28, 28), 200, 50]
# linear = [True, False, True]
# rf_shapes = [(9, 9), None]
# rates = [1., 0.3]
sigmoid = tt.nnet.sigmoid
shapes = [(28, 28), 500, 200, 50]
Exemple #4
0
    return test_labels[i] == labels[j]


# --- load the RBM data
# data = np.load('nlif-deep-orig.npz')
# data = np.load('lif-500-200-10.npz')
data = np.load("lif-126-error.npz")
weights = data["weights"]
biases = data["biases"]
Wc = data["Wc"]
bc = data["bc"]

# --- load the testing data
from autoencoder import mnist

_, _, [test_images, test_labels] = mnist()

for images in [test_images]:
    images -= images.mean(axis=0, keepdims=True)
    images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)

# shuffle
rng = np.random.RandomState(92)
inds = rng.permutation(len(test_images))
test_images = test_images[inds]
test_labels = test_labels[inds]

labels = np.unique(test_labels)
n_labels = labels.size

# --- test as ANN