forked from ahmdtaha/FineGrainedVisualRecognition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
fgvr_train.py
186 lines (136 loc) · 7.83 KB
/
fgvr_train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import tensorflow as tf
import constants as const
import configuration as config
import os
import numpy as np
from datetime import datetime
from pydoc import locate
import time
from data.tf_tuple_loader import TensorflowTupleLoader
from utils import tf_utils
import logging.config
from utils import log_utils
def mprint(msg,log):
#print(msg)
log.info(msg) # Use your own logger
def main():
img_generator_class = locate(config.db_tuple_loader)
args = dict()
args['csv_file'] = config.train_csv_file
train_iter = img_generator_class(args)
args['csv_file'] = config.test_csv_file
val_iter = img_generator_class(args)
train_imgs, train_lbls = train_iter.imgs_and_lbls()
val_imgs, val_lbls = val_iter.imgs_and_lbls()
save_model_dir = config.model_save_path
log_file = os.path.join(save_model_dir, "train")
logging.config.dictConfig(log_utils.get_logging_dict(log_file))
log = logging.getLogger('train')
log.info('Data Loading complete')
with tf.Graph().as_default():
train_dataset = TensorflowTupleLoader(train_imgs, train_lbls, is_training=True).dataset
val_dataset = TensorflowTupleLoader(val_imgs, val_lbls, is_training=False, batch_size=config.batch_size,
repeat=False).dataset
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_dataset.output_types, train_dataset.output_shapes)
images_ph, lbls_ph = iterator.get_next()
training_iterator = train_dataset.make_one_shot_iterator()
validation_iterator = val_dataset.make_initializable_iterator()
network_class = locate(config.network_name)
model = network_class(num_classes=config.num_classes, is_training=True, images_ph=images_ph, lbls_ph=lbls_ph)
trainable_vars = tf.trainable_variables()
if config.caffe_iter_size > 1: ## Accumulated Gradient
## Creation of a list of variables with the same shape as the trainable ones
# initialized with 0s
accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in trainable_vars]
zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in accum_vars]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
global_step = tf.Variable(0, name='global_step', trainable=False)
learning_rate = tf_utils.poly_lr(global_step)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
if config.caffe_iter_size > 1: ## Accumulated Gradient
grads = optimizer.compute_gradients(model.train_loss, trainable_vars)
# Adds to each element from the list you initialized earlier with zeros its gradient (works because accum_vars and gvs are in the same order)
accum_ops = [accum_vars[i].assign_add(gv[0]) for i, gv in enumerate(grads)]
iter_size = config.caffe_iter_size
# Define the training step (part with variable value update)
train_op = optimizer.apply_gradients([(accum_vars[i] / iter_size, gv[1]) for i, gv in enumerate(grads)],
global_step=global_step)
else:
grads = optimizer.compute_gradients(model.train_loss)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
# logger.info('=========================================================')
# for v in tf.trainable_variables():
# mprint('trainable_variables: {0} \t {1}'.format(str(v.name),str(v.shape)))
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
training_handle = sess.run(training_iterator.string_handle())
validation_handle = sess.run(validation_iterator.string_handle())
now = datetime.now()
if (config.tensorbaord_file == None):
tb_path = config.tensorbaord_dir + now.strftime("%Y%m%d-%H%M%S")
else:
tb_path = config.tensorbaord_dir + config.tensorbaord_file
start_iter = 1 # No Resume in this code version
train_writer = tf.summary.FileWriter(tb_path, sess.graph)
saver = tf.train.Saver() # saves variables learned during training
ckpt_file = os.path.join(save_model_dir, config.model_save_name)
print('Model Path ', ckpt_file)
load_model_msg = model.load_model(save_model_dir, ckpt_file, sess, saver, is_finetuning=True)
mprint(load_model_msg,log)
val_loss = tf.summary.scalar('Val_Loss', model.val_loss)
val_acc_op = tf.summary.scalar('Batch_Val_Acc', model.val_accuracy)
model_acc_op = tf.summary.scalar('Split_Val_Accuracy', model.val_accumulated_accuracy)
mprint('Start Training ***********',log)
best_acc = 0
best_model_step = 0
for current_iter in range(start_iter, config.max_iter+1):
start_time_train = time.time()
feed_dict = {handle: training_handle}
for mini_batch in range(config.caffe_iter_size - 1):
#feed_dict = {handle: training_handle}
sess.run(accum_ops, feed_dict)
model_loss_value, accuracy_value, _ = sess.run([model.train_loss, model.train_accuracy, train_op],
feed_dict)
if config.caffe_iter_size > 1: ## Accumulated Gradient
sess.run(zero_ops)
train_time = time.time() - start_time_train
if (current_iter % config.logging_threshold == 0 or current_iter ==1):
mprint(
'i {0:04d} loss {1:4f} Acc {2:2f} Batch Time {3:3f}'.format(current_iter, model_loss_value, accuracy_value,
train_time),log)
if (current_iter % config.test_iteration == 0):
run_metadata = tf.RunMetadata()
tf.local_variables_initializer().run()
sess.run(validation_iterator.initializer)
while True:
try:
feed_dict = {handle: validation_handle}
val_loss_op, batch_accuracy, accuracy_op, _val_acc_op, _val_acc, c_cnf_mat = sess.run(
[val_loss, model.val_accuracy, model_acc_op, val_acc_op, model.val_accumulated_accuracy,
model.val_confusion_mat], feed_dict)
except tf.errors.OutOfRangeError:
mprint('Val Acc {0}'.format(_val_acc),log)
break
train_writer.add_run_metadata(run_metadata, 'step%03d' % current_iter)
train_writer.add_summary(val_loss_op, current_iter)
train_writer.add_summary(_val_acc_op, current_iter)
train_writer.add_summary(accuracy_op, current_iter)
train_writer.flush()
if (current_iter % config.logging_threshold == 0):
saver.save(sess, ckpt_file)
if best_acc < _val_acc:
saver.save(sess, ckpt_file + 'best')
best_acc = _val_acc
best_model_step = current_iter
## Early dropping style.
mprint('Best Acc {0} at {1} == {2}'.format(best_acc, best_model_step, config.model_filename),log)
saver.save(sess, ckpt_file) ## Save final ckpt before closing
ckpt = os.path.join(save_model_dir, str(current_iter), config.model_save_name)
saver.save(sess, ckpt)
sess.close()
if __name__ == '__main__':
main()