-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
271 lines (226 loc) · 10.4 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
"""Convolutional network example.
Run the training for 50 epochs with
```
python main.py --num-epochs 50
```
It is going to reach around 0.8% error rate on the test set.
"""
import logging
import numpy
from argparse import ArgumentParser
from theano import tensor
from blocks.algorithms import GradientDescent, Scale
from blocks.bricks import (MLP, Rectifier, Initializable, FeedforwardSequence,
Softmax)
from blocks.bricks.interfaces import Activation
from blocks.bricks.conv import (Convolutional, ConvolutionalSequence,
Flattener, MaxPooling)
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
from blocks.extensions import FinishAfter, Timing, Printing, ProgressBar
from blocks.extensions.monitoring import (DataStreamMonitoring,
TrainingDataMonitoring)
# from blocks.extensions.saveload import Checkpoint
from checkpoint import CheckpointBlock
from blocks.graph import ComputationGraph
from blocks.initialization import Constant, Uniform
from blocks.main_loop import MainLoop
from blocks.model import Model
from blocks.monitoring import aggregation
from fuel.datasets import MNIST
from fuel.schemes import ShuffledScheme
from fuel.streams import DataStream
from toolz.itertoolz import interleave
class LeNet(FeedforwardSequence, Initializable):
"""LeNet-like convolutional network.
The class implements LeNet, which is a convolutional sequence with
an MLP on top (several fully-connected layers). For details see
[LeCun95]_.
.. [LeCun95] LeCun, Yann, et al.
*Comparison of learning algorithms for handwritten digit
recognition.*,
International conference on artificial neural networks. Vol. 60.
Parameters
----------
conv_activations : list of :class:`.Brick`
Activations for convolutional network.
num_channels : int
Number of channels in the input image.
image_shape : tuple
Input image shape.
filter_sizes : list of tuples
Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
feature_maps : list
Number of filters for each of convolutions.
pooling_sizes : list of tuples
Sizes of max pooling for each convolutional layer.
top_mlp_activations : list of :class:`.blocks.bricks.Activation`
List of activations for the top MLP.
top_mlp_dims : list
Numbers of hidden units and the output dimension of the top MLP.
conv_step : tuples
Step of convolution (similar for all layers).
border_mode : str
Border mode of convolution (similar for all layers).
"""
def __init__(self, conv_activations, num_channels, image_shape,
filter_sizes, feature_maps, pooling_sizes,
top_mlp_activations, top_mlp_dims,
conv_step=None, border_mode='valid', **kwargs):
if conv_step is None:
self.conv_step = (1, 1)
else:
self.conv_step = conv_step
self.num_channels = num_channels
self.image_shape = image_shape
self.top_mlp_activations = top_mlp_activations
self.top_mlp_dims = top_mlp_dims
self.border_mode = border_mode
conv_parameters = zip(conv_activations, filter_sizes, feature_maps)
# Construct convolutional layers with corresponding parameters
self.layers = []
for i, (activation, filter_size, num_filter) in enumerate(conv_parameters):
self.layers.extend(
[Convolutional(filter_size=filter_size,
num_filters=num_filter,
step=self.conv_step,
name='conv_{}'.format(i))])
self.layers.extend([activation])
for i, size in enumerate(pooling_sizes):
self.layers.extend([MaxPooling(size, name='pool_{}'.format(i))])
self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
border_mode=self.border_mode,
image_size=image_shape)
# Construct a top MLP
self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)
# We need to flatten the output of the last convolutional layer.
# This brick accepts a tensor of dimension (batch_size, ...) and
# returns a matrix (batch_size, features)
self.flattener = Flattener()
application_methods = [self.conv_sequence.apply, self.flattener.apply,
self.top_mlp.apply]
super(LeNet, self).__init__(application_methods, **kwargs)
@property
def output_dim(self):
return self.top_mlp_dims[-1]
@output_dim.setter
def output_dim(self, value):
self.top_mlp_dims[-1] = value
def _push_allocation_config(self):
self.conv_sequence._push_allocation_config()
conv_out_dim = self.conv_sequence.get_dim('output')
self.top_mlp.activations = self.top_mlp_activations
self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
def main(save_to, save_freq, num_epochs, feature_maps=None, mlp_hiddens=None,
conv_sizes=None, pool_sizes=None, batch_size=500,
num_batches=None):
if feature_maps is None:
feature_maps = [20, 50]
if mlp_hiddens is None:
mlp_hiddens = [500]
if conv_sizes is None:
conv_sizes = [5, 5]
if pool_sizes is None:
pool_sizes = [2, 2]
image_size = (28, 28)
output_size = 10
# Use ReLUs everywhere and softmax for the final prediction
conv_activations = [Rectifier() for _ in feature_maps]
mlp_activations = [Rectifier() for _ in mlp_hiddens] + [Softmax()]
convnet = LeNet(conv_activations, 1, image_size,
filter_sizes=zip(conv_sizes, conv_sizes),
feature_maps=feature_maps,
pooling_sizes=zip(pool_sizes, pool_sizes),
top_mlp_activations=mlp_activations,
top_mlp_dims=mlp_hiddens + [output_size],
border_mode='full',
weights_init=Uniform(width=.2),
biases_init=Constant(0))
# We push initialization config to set different initialization schemes
# for convolutional layers.
convnet.push_initialization_config()
convnet.layers[0].weights_init = Uniform(width=.2)
convnet.layers[1].weights_init = Uniform(width=.09)
convnet.top_mlp.linear_transformations[0].weights_init = Uniform(width=.08)
convnet.top_mlp.linear_transformations[1].weights_init = Uniform(width=.11)
convnet.initialize()
logging.info("Input dim: {} {} {}".format(
*convnet.children[0].get_dim('input_')))
for i, layer in enumerate(convnet.layers):
if (isinstance(layer, Activation)):
logging.info("Layer {} ({})".format(i, layer.__class__.__name__))
else:
logging.info("Layer {} ({}) dim: {} {} {}".format(
i, layer.__class__.__name__, *layer.get_dim('output')))
x = tensor.tensor4('features')
y = tensor.lmatrix('targets')
# Normalize input and apply the convnet
probs = convnet.apply(x)
cost = CategoricalCrossEntropy().apply(y.flatten(),
probs).copy(name='cost')
error_rate = MisclassificationRate().apply(y.flatten(), probs).copy(
name='error_rate')
cg = ComputationGraph([cost, error_rate])
mnist_train = MNIST(("train",))
mnist_train_stream = DataStream.default_stream(
mnist_train, iteration_scheme=ShuffledScheme(
mnist_train.num_examples, batch_size))
mnist_test = MNIST(("test",))
mnist_test_stream = DataStream.default_stream(
mnist_test,
iteration_scheme=ShuffledScheme(
mnist_test.num_examples, batch_size))
# Train with simple SGD
algorithm = GradientDescent(
cost=cost, parameters=cg.parameters,
step_rule=Scale(learning_rate=0.1))
# `Timing` extension reports time for reading data, aggregating a batch
# and monitoring;
# `ProgressBar` displays a nice progress bar during training.
extensions = [Timing(),
FinishAfter(after_n_epochs=num_epochs,
after_n_batches=num_batches),
DataStreamMonitoring(
[cost, error_rate],
mnist_test_stream,
prefix="test"),
TrainingDataMonitoring(
[cost, error_rate,
aggregation.mean(algorithm.total_gradient_norm)],
prefix="train",
after_epoch=True),
CheckpointBlock(save_to, every_n_batches=save_freq),
ProgressBar(),
Printing()]
model = Model(cost)
main_loop = MainLoop(
algorithm,
mnist_train_stream,
model=model,
extensions=extensions)
main_loop.run()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser("An example of training a convolutional network "
"on the MNIST dataset.")
parser.add_argument("--num-epochs", type=int, default=2,
help="Number of training epochs to do.")
parser.add_argument("save_to", default="mnist.pkl", nargs="?",
help="Destination to save the state of the training "
"process.")
parser.add_argument("save_freq", default=5, nargs="?",
help="Checkpoint frequency")
parser.add_argument("--feature-maps", type=int, nargs='+',
default=[20, 50], help="List of feature maps numbers.")
parser.add_argument("--mlp-hiddens", type=int, nargs='+', default=[500],
help="List of numbers of hidden units for the MLP.")
parser.add_argument("--conv-sizes", type=int, nargs='+', default=[5, 5],
help="Convolutional kernels sizes. The kernels are "
"always square.")
parser.add_argument("--pool-sizes", type=int, nargs='+', default=[2, 2],
help="Pooling sizes. The pooling windows are always "
"square. Should be the same length as "
"--conv-sizes.")
parser.add_argument("--batch-size", type=int, default=500,
help="Batch size.")
args = parser.parse_args()
main(**vars(args))