/
train.py
131 lines (101 loc) · 4.2 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/usr/bin/env python
"""train.py
Usage:
train.py [--n_mem=<dim>] [--dropout=<p>]
Options:
--n_mem=<dim> Dimension of LSTM [default: 300]
--dropout=<p> strength of dropout [default: 0.3]
"""
import theano
import logging
import numpy as np
import pprint
import json
import cPickle as pkl
from collections import OrderedDict
from fuel.transformers import *
from fuel.datasets import IndexableDataset
from fuel.schemes import ShuffledScheme
from fuel.streams import DataStream
from blocks.model import Model
from blocks.extensions.monitoring import *
from blocks.algorithms import *
from blocks.main_loop import MainLoop
from blocks.extensions import *
from blocks.extensions.saveload import Checkpoint
from blocks.graph import *
from model import LSTMModel
np.random.seed(1)
def _transpose(data):
return tuple(array.T for array in data)
def wrap_stream(split):
parses, relations = split
parse1, parse2 = zip(*parses)
dataset = IndexableDataset(
indexables=OrderedDict([('parse1', parse1), ('parse2', parse2), ('relation', np.array(relations))]),
axis_labels={'parse1': ('batch', 'index'), 'parse2': ('batch', 'index'), 'relation': ('batch', 'index')}
)
stream = DataStream(dataset=dataset, iteration_scheme=ShuffledScheme(examples=dataset.num_examples, batch_size=128))
stream = Padding(stream, mask_sources=('parse1', 'parse2'))
stream = Mapping(stream, _transpose, )
stream = Cast(stream, 'int32')
return stream
if __name__ == '__main__':
from docopt import docopt
logging.basicConfig(level=logging.INFO)
args = docopt(__doc__)
n_mem = int(args['--n_mem'])
dropout = float(args['--dropout'])
n_epoch = 10
logging.info(pprint.pformat(args))
with open('dataset/vocab.pkl') as f:
vocabs = pkl.load(f)
word_vocab, rel_vocab = vocabs['word'], vocabs['rel']
with open('dataset/trainXY.json') as f:
train = json.load(f)
train = wrap_stream(train)
with open('dataset/testXY.json') as f:
test = json.load(f)
test = wrap_stream(test)
model = LSTMModel(len(vocabs['word']), n_mem, len(vocabs['rel']))
cg = ComputationGraph(model.cost)
bricks_model = Model(model.cost)
for brick in bricks_model.get_top_bricks():
brick.initialize()
model.lookup.W.set_value(vocabs['word'].get_embeddings().astype(theano.config.floatX))
if dropout:
pass
# logger.info('Applying dropout of {}'.format(dropout))
# lstm_dropout = [v for v in cg.intermediary_variables if v.name in {'W_cell_to_in', 'W_cell_to_out'}]
# cg = apply_dropout(cg, lstm_dropout, drop_prob=dropout)
# summary of what's going on
parameters = bricks_model.get_parameter_dict()
logger.info("Parameters:\n" +
pprint.pformat(
[(key, value.get_value().shape, value.get_value().mean()) for key, value
in parameters.items()],
width=120))
algorithm = GradientDescent(cost=model.cost, parameters=cg.parameters, step_rule=Adam())
# Fetch variables useful for debugging
observables = [model.cost, model.acc, algorithm.total_step_norm, algorithm.total_gradient_norm ]
for name, parameter in parameters.items():
observables.append(parameter.norm(2).copy(name=name + "_norm"))
observables.append(algorithm.gradients[parameter].norm(2).copy(name=name + "_grad_norm"))
train_monitor = TrainingDataMonitoring(variables=observables, prefix="train", after_batch=True)
test_monitor = DataStreamMonitoring(variables=[model.cost, model.acc], data_stream=test, prefix="test")
average_monitoring = TrainingDataMonitoring(
observables, prefix="average", every_n_batches=10)
main_loop = MainLoop(
model=bricks_model,
data_stream=train,
algorithm=algorithm,
extensions=[
Timing(),
train_monitor,
test_monitor,
average_monitoring,
FinishAfter(after_n_epochs=n_epoch),
Checkpoint('model.save', every_n_batches=500,
save_separately=["model", "log"]),
Printing(every_n_epochs=1)])
main_loop.run()