def train(self, passes, new_training=True): with tf.Session() as sess: global_step=tf.Variable(0, trainable=False) #learning_rate = tf.train.exponential_decay(0.001, global_step, 200, 0.8, staircase=True) training = tf.train.AdamOptimizer(self.config.learning_rate).minimize(self.loss) if new_training: saver, global_step = Model.start_new_session(sess) else: saver, global_step = Model.continue_previous_session(sess, model_file='cnn', ckpt_file= self.config.root + '/event_detect/saver/cnn/checkpoint') sess.run(tf.local_variables_initializer()) self.train_writer.add_graph(sess.graph, global_step=global_step) test_restlt=[] for step in range(1 + global_step, 1 + passes + global_step): input, target = self.reader.get_cnn_batch_data('train') #print(input.shape) summary, _, acc = sess.run([self.merged, training, self.metrics['accuracy']], feed_dict={self.layer['input']: input, self.layer['target']: target}) self.train_writer.add_summary(summary, step) if step % 10 == 0: loss = sess.run(self.loss, feed_dict={self.layer['input']: input, self.layer['target']: target}) test_restlt.append(loss) print("gobal_step {}, training_loss {}, accuracy {}".format(step, loss, acc)) if step % 100 == 0: test_x, text_y = self.reader.get_cnn_batch_data('test') acc, recall, precision = sess.run([self.metrics['accuracy'], self.metrics['recall'], self.metrics['precision']], feed_dict={self.layer['input']: test_x, self.layer['target']: text_y}) print("test: accuracy {}, recall {}, precision {}".format(acc, recall, precision)) saver.save(sess, self.config.root + '/event_detect/saver/cnn/cnn', global_step=step) print('checkpoint saved') #print(sess.run([self.layer['class_prob']], feed_dict={self.layer['input']: input})) print(test_restlt)
def train(self, passes, new_training=True): with tf.Session() as sess: training = tf.train.AdamOptimizer(1e-3).minimize(self.loss) if new_training: saver, global_step = Model.start_new_session(sess) else: saver, global_step = Model.continue_previous_session( sess, model_file='cnn', ckpt_file='saver/cnn/checkpoint') sess.run(tf.local_variables_initializer()) self.train_writer.add_graph(sess.graph, global_step=global_step) for step in range(1 + global_step, 1 + passes + global_step): input, target = self.reader.get_cnn_batch_data('train') summary, _, acc = sess.run( [self.merged, training, self.metrics['accuracy']], feed_dict={ self.layer['input']: input, self.layer['target']: target }) self.train_writer.add_summary(summary, step) if step % 10 == 0: loss = sess.run(self.loss, feed_dict={ self.layer['input']: input, self.layer['target']: target }) print( "gobal_step {}, training_loss {}, accuracy {}".format( step, loss, acc)) if step % 100 == 0: test_x, text_y = self.reader.get_cnn_batch_data('test') acc, recall, precision = sess.run([ self.metrics['accuracy'], self.metrics['recall'], self.metrics['precision'] ], feed_dict={ self.layer['input']: test_x, self.layer['target']: text_y }) print("test: accuracy {}, recall {}, precision {}".format( acc, recall, precision)) saver.save(sess, 'saver/cnn/cnn', global_step=step) print('checkpoint saved')
def train(self, passes, new_training=True): sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True with tf.Session(config=sess_config) as sess: if new_training: saver, global_step = Model.start_new_session(sess) else: saver, global_step = Model.continue_previous_session(sess, model_file='cblstm', ckpt_file='eqpickup/saver/cblstm/checkpoint') self.train_writer.add_graph(sess.graph, global_step=global_step) for step in range(1 + global_step, 1 + passes + global_step): with tf.variable_scope('Train'): input_, targets = self.reader.get_cblstm_batch_data('train', self.reader.pre_data_generator, self.reader.pre_validation_data) input_, seq_len = self.data_padding_preprocess(input_, 'input') targets, _ = self.data_padding_preprocess(targets, 'targets') _, train_summary, loss, pred_seq = sess.run( [self.train_op, self.train_merged, self.loss, self.layer['pred_seq']], feed_dict={self.layer['input']: input_, self.layer['targets']: targets, self.layer['seq_len']: seq_len, self.layer['keep_prob']: self.config.dl_tradition_model_config.cblstm_keep_prob}) self.train_writer.add_summary(train_summary, step) train_p_err, train_p_err_max, train_s_err, train_s_err_max = get_p_s_error(pred_seq, targets, seq_len) train_acc = get_acc(pred_seq, targets, seq_len) [train_metrics_summary] = sess.run( [self.train_metrics_merged], feed_dict={self.train_metrics['acc']: train_acc, self.train_metrics['p_error']: train_p_err, self.train_metrics['p_error_max']: train_p_err_max, self.train_metrics['s_error']: train_s_err, self.train_metrics['s_error_max']: train_s_err_max}) self.train_writer.add_summary(train_metrics_summary, step) print("gobal_step {}," " training_loss {}," " accuracy {}," " p_error {}," " p_err_max {}," " s_error {}," " s_err_max {}.".format(step, loss, train_acc, train_p_err, train_p_err_max, train_s_err, train_s_err_max)) if step % 50 == 0: with tf.variable_scope('Test', reuse=True): test_input, test_targets = self.reader.get_cblstm_batch_data('test', self.reader.pre_data_generator, self.reader.pre_validation_data) test_input, test_seq_len = self.data_padding_preprocess(test_input, 'input') test_targets, _ = self.data_padding_preprocess(test_targets, 'targets') [test_pred_seq] = sess.run([self.layer['pred_seq']], feed_dict={self.layer['input']: test_input, self.layer['seq_len']: test_seq_len, self.layer['keep_prob']: 1.0}) test_p_err, test_p_err_max, test_s_err, test_s_err_max = get_p_s_error(test_pred_seq, test_targets, test_seq_len) test_acc = get_acc(test_pred_seq, test_targets, test_seq_len) [test_metrics_summary] = sess.run( [self.test_metrics_merged], feed_dict={self.test_metrics['acc']: test_acc, self.test_metrics['p_error']: test_p_err, self.test_metrics['p_error_max']: test_p_err_max, self.test_metrics['s_error']: test_s_err, self.test_metrics['s_error_max']: test_s_err_max}) self.train_writer.add_summary(test_metrics_summary, step) print("test_acc {}, " "test_p_err {}," "test_p_err_max {}," "test_s_err {}," "test_s_err_max {}.".format(test_acc, test_p_err, test_p_err_max, test_s_err, test_s_err_max)) if step % 50 == 0: saver.save(sess, 'eqpickup/saver/cblstm/cblstm', global_step=step) print('checkpoint saved')
def train(self, passes, new_training=True): sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True sess_config.allow_soft_placement = True with tf.Session(config=sess_config) as sess: if new_training: saver, global_step = Model.start_new_session(sess) else: saver, global_step = Model.continue_previous_session(sess, model_file='model/saver/{}'.format(self.saveFile), ckpt_file='model/saver/{}/checkpoint'.format( self.saveFile)) self.train_writer.add_graph(sess.graph, global_step=global_step) walk_times = 1 for step in range(1 + global_step, 1 + passes + global_step): with tf.variable_scope('Train'): walk_nodes = self.reader.nodes_walk_reader() neg_walk_nodes = [self.g.negative_sample(walk_nodes[i], self.config.loss1_neg_sample_num, self.g.nodes_degree_table) for i in range(len(walk_nodes))] neg_walk_nodes = np.array(neg_walk_nodes) walk_nodes_labels = list() for node_list in walk_nodes: nodes_label_tmp = self.g.get_train_node_label(node_list) walk_nodes_labels.append(nodes_label_tmp) walk_nodes_labels = np.array(walk_nodes_labels) # if (step - 1) % int(self.g.nodes_num / self.config.nodes_seq_batch_num) == 0: # print(walk_times) # walk_times += 1 if step < 200 and self.init_emb_file is not None: train_op = self.train_op[1] else: train_op = self.train_op[0] _, train_summary, loss = sess.run( [train_op, self.loss_train_merged, self.layer['loss']], feed_dict={self.layer['walk_nodes']: walk_nodes, self.layer['walk_nodes_labels']: walk_nodes_labels, self.layer['neg_walk_nodes']: neg_walk_nodes}) self.train_writer.add_summary(train_summary, step) if step % 500 == 0 or step == 1: [node_emb, sup_emb] = sess.run([self.layer['emb'], self.layer['sup_emb']]) node_emb = np.concatenate((node_emb, sup_emb), axis=1) print("gobal_step {},loss {}".format(step, loss)) if step % 1000 == 0 or step == 1: micro_f1, macro_f1 = self.multi_label_node_classification(node_emb) [test_summary] = sess.run([self.test_merged], feed_dict={self.test_metrics['micro_f1']: micro_f1, self.test_metrics['macro_f1']: macro_f1}) print("micro_f1 {},macro_f1 {}".format(micro_f1, macro_f1)) self.train_writer.add_summary(test_summary, step) saver.save(sess, 'model/saver/{}/MPRSNE'.format(self.saveFile), global_step=step) print('checkpoint saved')