-
Notifications
You must be signed in to change notification settings - Fork 1
/
tensorflow_SF.py
136 lines (98 loc) · 3.45 KB
/
tensorflow_SF.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os
import time
import numpy as np
import tensorflow as tf
from scipy.io import loadmat
import utilities.visualize as visualize
def norm(f):
"""
Perform sparse filtering normalization procedure.
Parameters:
----------
f : ndarray
The activation of the network. [neurons x examples]
Returns:
-------
f_hat : ndarray
The row and column normalized matrix of activation.
"""
fs = tf.sqrt(tf.square(f) + 1e-8) # soft-absolute function
l2fs = tf.sqrt(tf.reduce_sum(tf.square(fs), reduction_indices=1)) # l2 norm of row
nfs = fs / tf.tile(tf.expand_dims(l2fs, 1), [1, 50000]) # normalize rows
l2fn = tf.sqrt(tf.reduce_sum(tf.square(nfs), reduction_indices=0)) # l2 norm of column
f_hat = nfs / tf.tile(tf.expand_dims(l2fn, 0), [100, 1]) # normalize columns
return f_hat
class SparseFilter(object):
""" Sparse Filtering """
def __init__(self, w, x):
"""
Build a sparse filtering model.
Parameters:
----------
w : ndarray
Weight matrix randomly initialized.
x : ndarray (symbolic Theano variable)
Data for model.
"""
# assign inputs to sparse filter
self.w = w
self.x = x
# define normalization procedure
self.norm = norm
def dot(self):
""" Returns dot product of weights and input data """
f = tf.matmul(self.w, self.x.T)
return f
def feed_forward(self):
""" Performs sparse filtering normalization procedure """
f_hat = self.norm(self.dot())
return f_hat
def main():
# define global parameters
filename = 'patches.mat'
n_filters = 100
learn_rate = 0.001
iterations = [200]
# load in data and preprocess
print "loading data..."
base_path = os.path.dirname(__file__)
file_path = os.path.join(base_path, "data", filename)
data = loadmat(file_path)['X']
data -= data.mean(axis=0)
data = np.float32(data.T)
# construct the network
print "building model..."
weights = tf.Variable(tf.random_uniform([n_filters, data.shape[1]]))
model = SparseFilter(weights, data)
# define loss, optimizer, and train function
loss = tf.reduce_sum(model.feed_forward())
optimizer = tf.train.GradientDescentOptimizer(learn_rate)
train = optimizer.minimize(loss)
# initialize all the variables
init = tf.initialize_all_variables()
# run the session
sess = tf.Session()
sess.run(init)
# train the sparse filtering network
print "training network..."
t = time.time()
cost_running = []
# iterate over training epochs
for epoch in xrange(iterations[0]):
sess.run(train)
current_cost = sess.run(loss)
cost_running.append(current_cost)
print("Cost at epoch %i: %0.4f" % (epoch, current_cost))
# calculate and display elapsed training time
elapsed = time.time() - t
print('Elapsed training time: %f' % elapsed)
# plot the cost function over time
c = {'layer0': cost_running}
visualize.plotCost(c)
# visualize the receptive fields of the first layer
weights_final = sess.run(weights)
print weights_final.shape
visualize.drawplots(weights_final.T, color='gray', convolution='n',
pad=0, examples=None, channels=1)
if __name__ == '__main__':
main()