Example #1
0
I = load_images(mat_file)
n = 10000
X = sample_images(I,n=n)

d = X.shape[0] # input dimension

print 'Sparse autoencoder applied to textured data\n'

print 'Data:'
print '------'
print 'Number of samples for training:',n,'\n'

# define the neural network parameters and optimization criteria
nnet_params = {'n_hid':25,'decay':0.0001,'beta':3,'rho':0.01}
optim_params = {'method':'L-BFGS-B','n_iter':400}

# print out to console
dp.pretty_print('Autoencoder parameters',nnet_params)
dp.pretty_print('Optimization parameters',lbfgs_params)

# apply the model
sae = ae.Autoencoder(d=d,**nnet_params) 
sae.fit(X,**optim_params)
X_r = sae.transform(X,'reconstruct')
X_max = sae.compute_max_activations()

np.savez('image_bases',X_max=X_max)
files = np.load('image_bases.npz')
X_max = files['X_max']
visualize_image_bases(X_max, 25)
plt.show()
Example #2
0
dummy,row,col = train_img.shape
d = row*col # dimensions
X_tr = np.reshape(train_img[:num_img],(num_img,d)).T/255. # train data matrix

# Neural network initialization parameters

print 'Sparse Autoencoder applied to MNIST data\n'

print 'Data:'
print '------'
print 'Number of samples for training:',num_img,'\n'

nnet_params = {'d':d,'n_hid':196,'decay':0.003,'beta':3,'rho':0.1}
optim_params = {'method':'L-BFGS-B','n_iter':400}

dp.pretty_print('Neural Network parameters',**nnet_params)
dp.pretty_print('Optimization parameters',optim_params)

neural_net = ae.Autoencoder(**nnet_params) 
neural_net.fit(X_tr,**optim_params)

X_max = neural_net.compute_max_activations()

def visualize_image_bases(X_max,n_hid,w=28,h=28):
	plt.figure()
	for i in range(n_hid):
		plt.subplot(14,14,i)
		curr_img = X_max[:,i].reshape(w,h)
		curr_img /= 1.*np.max(curr_img) # for consistency
		plt.imshow(curr_img,cmap='gray',interpolation='none')
# set the targets for the test-set
y_te = np.zeros((k, m_te))
for i, idx in enumerate(test_lbl):
    y_te[idx, i] = 1

# create the architecture of the neural network. define the initial parameters for pre-training and
# fine-tuning
nnet_params = {
    "d": d,
    "k": k,
    "n_hid": [200, 200],
    "rho": [0.1, 0.1],
    "beta": [3, 3],
    "sae_decay": [0.003, 0.003],
    "scl_decay": 0.0001,
}
dp.pretty_print("Deep Autoencoder Neural Network", nnet_params)
pretrain_params = {"method": "L-BFGS-B", "n_iter": 400}
dp.pretty_print("Pre-training parameters", pretrain_params)
finetune_params = {"method": "L-BFGS-B", "n_iter": 400}
dp.pretty_print("Fine-tuning parameters", finetune_params)

nnet = dac.DeepAutoencoderClassifier(**nnet_params)  # define the deep net
nnet.pre_train(X_tr, **pretrain_params)  # perform pre-training
nnet.fit(X_tr, y_tr, **finetune_params)  # fit the model
pred, mce_te = nnet.predict(X_te, y_te)  # predict

print "Performance:"
print "------------"
print "Accuracy:", 100.0 * (1 - mce_te), "%"