def test(): model = Model("data.txt", max_size) model.load() checker = Checker() print(">", end='') while True: text = input() print("Input :", text) output = model.test(text) print("Output:", output) fixed = checker.correct(output[0]) print("Fixed :", fixed) print(">", end='')
def eval(self): with self.graph.as_default(): self.x = tf.placeholder(dtype=tf.float32, shape=(FLAGS.esize, FLAGS.height, FLAGS.width, 3), name="inputs") self.y = tf.placeholder(dtype=tf.int32, shape=(1, ), name='label') self.keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob') logits = Model(self.x, self.is_training, self.keep_prob).logits self._calc_accuracy(logits, self.y) with tf.name_scope('Cost'): cross_entropy = slim.losses.sparse_softmax_cross_entropy( logits=logits, labels=self.y, scope='cross_entropy') tf.summary.scalar("cross_entropy", cross_entropy) with tf.name_scope('Optimizer'): self.global_step = tf.Variable(0, name='global_step', trainable=False) optimizer = tf.train.GradientDescentOptimizer(FLAGS.lrate) # optimizer = tf.train.AdamOptimizer(FLAGS.lrate) # optimizer = tf.train.MomentumOptimizer(FLAGS.lrate, 0.9, use_nesterov=True) # optimizer = tf.train.RMSPropOptimizer(FLAGS.lrate) self.train_step = slim.learning.create_train_op( cross_entropy, optimizer, self.global_step, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE) self.summary_op = tf.summary.merge_all() self.saver = tf.train.Saver()
layers = [ MaxPool(size=2, stride=2), Convolution((8, 3, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh, weight_initializer=initializer[0]), MaxPool(size=2, stride=2), Convolution((16, 8, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh, weight_initializer=initializer[1]), MaxPool(size=2, stride=2), Convolution((32, 16, 3, 3), stride=1, padding=1, dropout_rate=0, activation=activation.tanh, weight_initializer=initializer[2]), MaxPool(size=2, stride=2), ConvToFullyConnected(), FullyConnected(size=64, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] model = Model( layers=layers, num_classes=10, optimizer=GDMomentumOptimizer(lr=1e-3, mu=0.9), ) print("\n\n------------------------------------") print("Initialize: {}".format(initializer)) print("\nRun training:\n------------------------------------") stats = model.train(data_set=data, method='dfa', num_passes=num_passes, batch_size=50) loss, accuracy = model.cost(*data.test_set()) print("\nResult:\n------------------------------------") print('loss on test set: {}'.format(loss)) print('accuracy on test set: {}'.format(accuracy))
num_iteration = 20 data = dataset.cifar10_dataset.load() layers = [ ConvToFullyConnected(), FullyConnected(size=1000, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] # ------------------------------------------------------- # Train with BP # ------------------------------------------------------- model = Model( layers=layers, num_classes=10, optimizer=GDMomentumOptimizer(lr=1e-3, mu=0.9), ) print("\nRun training:\n------------------------------------") stats_shallow = model.train(data_set=data, method='dfa', num_passes=num_iteration, batch_size=64) loss, accuracy = model.cost(*data.test_set()) print("\nResult:\n------------------------------------") print('loss on test set: {}'.format(loss)) print('accuracy on test set: {}'.format(accuracy))
def eval(self): with self.graph.as_default(): self.x = tf.placeholder(dtype=tf.float32, shape=(None, self.img_size, self.img_size, 4), name="img_inputs") self.x_nchw = tf.transpose(self.x, perm=[0, 3, 1, 2]) self.h_state_init_1 = tf.placeholder(dtype=tf.float32, shape=(1, self.GRU_SIZE), name="h_state_init1") # self.h_state_init_2 = tf.placeholder(dtype=tf.float32, shape=(None, self.GRU_SIZE), name="h_state_init2") # _h_state_init = tuple([self.h_state_init_1,self.h_state_init_2]) self.det_anno = tf.placeholder(dtype=tf.float32, shape=(None, self.cell_size * self.cell_size * 5), name="det_anno") self.prev_asscoia = tf.placeholder( dtype=tf.float32, shape=(None, self.record_N * (self.cell_size * self.cell_size + 1)), name="prev_asscoia") # self.cell_state_init = tf.placeholder(dtype=tf.float32, shape=(1, 4096), name="cell_state_init") self.track_y = tf.placeholder(dtype=tf.float32, shape=(None, self.cell_size * self.cell_size * 5), name='track_label') self.current_asscoia_y = tf.placeholder( dtype=tf.float32, shape=(None, (self.record_N + 1) * (self.cell_size * self.cell_size + 1)), name="prev_asscoia") self.epsilon_vector_y = tf.placeholder(dtype=tf.float32, shape=(None, self.record_N), name="epsilon_vector") mynet = Model(self.x_nchw, self.det_anno, self.prev_asscoia, self.h_state_init_1, is_training=True, data_format='NCHW', keep_prob=0.5) coord_flow = mynet.coord_flow epsilon_flow = mynet.epsilon_flow associa_flow = mynet.associa_flow rnn_coord_state = mynet.rnn_coord_state rnn_associa_state = mynet.rnn_associa_state with tf.name_scope('Cost'): coord_loss = self.coord_loss_function(coord_flow, self.track_y, name='coord_loss') self.epsilon_loss, self.target_associa_loss, self.input_associa_loss = self.association_loss( epsilon_flow, associa_flow, self.current_asscoia_y, self.epsilon_vector_y, name='track_loss') # self.epsilon_loss = tf.Print(self.epsilon_loss,[self.epsilon_loss], message="epsilon_loss:") # self.associa_loss = tf.Print(self.associa_loss,[self.associa_loss], message="associa_loss:") reg_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.total_loss = tf.add_n([ coord_loss + self.epsilon_loss + self.target_associa_loss + self.input_associa_loss ] + reg_losses) # self.total_loss = tf.add_n(reg_losses) tf.summary.scalar("total_loss", self.total_loss) with tf.name_scope('Optimizer'): self.global_step = tf.Variable(0, name='global_step', trainable=False) #optimizer = tf.train.GradientDescentOptimizer(FLAGS.lrate) self.optimizer = tf.train.AdamOptimizer(FLAGS.lrate, epsilon=0.01) # optimizer = tf.train.MomentumOptimizer(FLAGS.lrate, 0.9, use_nesterov=True) # optimizer = tf.train.RMSPropOptimizer(FLAGS.lrate) self.train_step = slim.learning.create_train_op( self.total_loss, self.optimizer, self.global_step, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE) self.summary_op = tf.summary.merge_all() self.saver = tf.train.Saver(max_to_keep=None)
padding=1, dropout_rate=0, activation=activation.tanh), #MaxPool(size=2, stride=2), ConvToFullyConnected(), FullyConnected(size=64, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] # ------------------------------------------------------- # Train with BP # ------------------------------------------------------- model = Model(layers=layers, num_classes=10, optimizer=GDMomentumOptimizer(lr=1e-2, mu=0.9), lr_decay=0.5, lr_decay_interval=7) print("\nRun training:\n------------------------------------") stats_bp = model.train(data_set=data, method='bp', num_passes=num_iteration, batch_size=64) loss, accuracy = model.cost(*data.test_set()) print("\nResult:\n------------------------------------") print('loss on test set: {}'.format(loss)) print('accuracy on test set: {}'.format(accuracy))
weight_initializer=weight_initializer.Fill(0), fb_weight_initializer=weight_initializer.RandomNormal(1/np.sqrt(4*4*32))), MaxPool(size=2, stride=2), ConvToFullyConnected(), FullyConnected(size=64, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] ] statistics = [] for model_layer in model_layers: model = Model( layers=model_layer, num_classes=10, optimizer=GDMomentumOptimizer(lr=1e-3, mu=0.9), # regularization=0.001, # lr_decay=0.5, # lr_decay_interval=100 ) print("\nRun training:\n------------------------------------") stats = model.train(data_set=data, method='dfa', num_passes=5, batch_size=50) loss, accuracy = model.cost(*data.test_set()) print("\nResult:\n------------------------------------") print('loss on test set: {}'.format(loss)) print('accuracy on test set: {}'.format(accuracy)) statistics.append(stats)
padding=1, dropout_rate=0, activation=activation.tanh), #MaxPool(size=2, stride=2), ConvToFullyConnected(), FullyConnected(size=64, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] # ------------------------------------------------------- # Train with DFA # ------------------------------------------------------- model = Model(layers=layers, num_classes=10, optimizer=GDMomentumOptimizer(lr=1e-2, mu=0.9), lr_decay=0.5, lr_decay_interval=50) print("\nRun training:\n------------------------------------") stats_dfa = model.train(data_set=data, method='dfa', num_passes=num_iteration, batch_size=64) loss, accuracy = model.cost(*data.test_set()) print("\nResult:\n------------------------------------") print('loss on test set: {}'.format(loss)) print('accuracy on test set: {}'.format(accuracy))
img_input2 = tf.transpose(img_input, perm=[0, 3, 1, 2]) else: img_input2 = img_input h_state_init_1 = tf.placeholder(dtype=tf.float32, shape=(1, 4096)) h_state_init_2 = tf.placeholder(dtype=tf.float32, shape=(1, 4096)) _h_state_init = tuple([h_state_init_1, h_state_init_2]) cell_state_init = tf.placeholder(dtype=tf.float32, shape=(1, 768)) track_record = np.zeros((cell_size, cell_size), dtype=np.float32) max_track_id = 0 graph = tf.Graph() mynet = Model(img_input2, _h_state_init, cell_state_init, is_training=False, keep_prob=1, data_format=data_format) isess.run(tf.global_variables_initializer()) # Restore model. saver = tf.train.Saver() saver.restore(isess, chkpt_file) def coord_loss(tensor_x, label_y): tensors = np.reshape(tensor_x, (cell_size, cell_size, 5)) labels = np.reshape(label_y, (cell_size, cell_size, 7)) predict_confidence = tensors[:, :, 0]
padding=1, dropout_rate=0, activation=activation.tanh), #MaxPool(size=2, stride=2), ConvToFullyConnected(), FullyConnected(size=64, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] # ------------------------------------------------------- # Train with BP # ------------------------------------------------------- model = Model( layers=layers, num_classes=10, optimizer=GDMomentumOptimizer(lr=3 * 1e-2, mu=0.9), ) print("\nRun training:\n------------------------------------") stats = model.train(data_set=data, method='bp', num_passes=num_iteration, batch_size=64) loss, accuracy = model.cost(*data.test_set()) print("\nResult:\n------------------------------------") print('loss on test set: {}'.format(loss)) print('accuracy on test set: {}'.format(accuracy))
num_iteration = 10 data = dataset.cifar10_dataset.load() """ DFA Model definition """ layers_dfa = [ ConvToFullyConnected(), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] model_dfa = Model(layers=layers_dfa, num_classes=10, optimizer=GDMomentumOptimizer(lr=3 * 1e-3, mu=0.9), regularization=0.09, lr_decay=0.5, lr_decay_interval=3) """ BP Model definition """ layers_bp = [ ConvToFullyConnected(), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=500, activation=activation.tanh), FullyConnected(size=10, activation=None, last_layer=True) ] model_bp = Model(layers=layers_bp, num_classes=10,
def train(): with tf.device('/GPU:0'): model = Model("data.txt", max_size) model.load() model.train(50)
return { 'training_loss': train_loss_history, 'training_accuracy': train_accuracy_history, 'training_recall': recall_history, 'validation_loss': val_loss_history, 'validation_accuracy': val_accuracy_history, 'validation_recall': recall_history } if __name__ == '__main__': # split name must equal to split filename eg: for train.txt -> train train_data = dataset.DocumentsDataset(split_name='train') val_data = dataset.DocumentsDataset(split_name='val') VOCAB_SIZE = len(train_data.vocab) training_data = data.DataLoader(train_data, batch_size=config.BATCH_SIZE, shuffle=True) validation_data = data.DataLoader(val_data, batch_size=config.BATCH_SIZE, shuffle=True) relie = Model(VOCAB_SIZE, config.EMBEDDING_SIZE, config.NEIGHBOURS, config.HEADS) # relie = torch.load('output/model.pth') history = train(relie, training_data, validation_data, config.EPOCHS) print(history)
det_anno = tf.placeholder(dtype=tf.float32, shape=(1, cell_size * cell_size * 5)) prev_asscoia = tf.placeholder(dtype=tf.float32, shape=(1, record_N * (cell_size * cell_size + 1)), name="prev_asscoia") track_record = [] max_track_id = 0 graph = tf.Graph() mynet = Model(img_input2, det_anno, prev_asscoia, h_state_init_1, is_training=False, keep_prob=1, data_format=data_format) isess.run(tf.global_variables_initializer()) # Restore model. saver = tf.train.Saver() saver.restore(isess, chkpt_file) def feature_decode(example): frame_id = example.features.feature['frame_id'].int64_list.value[0]