def train_trigger(self): train, dev, test = self.t_train, self.t_dev, self.t_test saver = tf.train.Saver() maxlen = self.maxlen print('--Training Trigger--') with tf.Session() as sess: sess.run(tf.global_variables_initializer()) devbest = 0 testbest = (0, 0, 0) from tqdm import tqdm for epoch in tqdm(range(constant.t_epoch)): loss_list = [] for batch in get_batch(train, constant.t_batch_size, True): loss, _ = sess.run([self.loss, self.train_op], feed_dict=get_trigger_feeddict( self, batch, self.stage, maxlen)) loss_list.append(loss) print('epoch:{}'.format(str(epoch))) print('loss:', np.mean(loss_list)) pred_labels = [] for batch in get_batch(dev, constant.t_batch_size, False): pred_label = sess.run(self.pred_label, feed_dict=get_trigger_feeddict( self, batch, self.stage, maxlen, is_train=False)) pred_labels.extend(list(pred_label)) golds = list(dev[0][4]) dev_p, dev_r, dev_f = f_score(pred_labels, golds) print("dev_Precision: {} dev_Recall:{} dev_F1:{}".format( str(dev_p), str(dev_r), str(dev_f))) pred_labels = [] for batch in get_batch(test, constant.t_batch_size, False): pred_label = sess.run(self.pred_label, feed_dict=get_trigger_feeddict( self, batch, self.stage, maxlen, is_train=False)) pred_labels.extend(list(pred_label)) golds = list(test[0][4]) test_p, test_r, test_f = f_score(pred_labels, golds) print("test_Precision: {} test_Recall:{} test_F1:{}\n".format( str(test_p), str(test_r), str(test_f))) if dev_f > devbest: devbest = dev_f testbest = (test_p, test_r, test_f) saver.save(sess, "saved_models/trigger.ckpt") test_p, test_r, test_f = testbest print( "test best Precision: {} test best Recall:{} test best F1:{}". format(str(test_p), str(test_r), str(test_f)))
def train_argument(self): print('--Training Argument--') train, dev, test = self.a_train, self.a_dev, self.a_test with tf.Session() as sess: devbest = 0 testbest = (0, 0, 0) sess.run(tf.global_variables_initializer()) for epoch in range(constant.a_epoch): loss_list = [] for batch in get_batch(train, constant.a_batch_size, shuffle=True): loss, _ = sess.run([self.loss, self.train_op], feed_dict=get_argument_feeddict( self, batch, True, "argument")) loss_list.append(loss) print('epoch:{}'.format(str(epoch))) print('loss:', np.mean(loss_list)) pred_labels = [] for batch in get_batch(dev, constant.a_batch_size, False): pred_event_types, feed_dict = get_argument_feeddict( self, batch, False, "argument") pred_label = sess.run(self.pred_label, feed_dict=feed_dict) pred_labels.extend( list(zip(list(pred_event_types), list(pred_label)))) golds = list(zip(list(dev[1]), list(dev[2]))) dev_p, dev_r, dev_f = f_score(pred_labels, golds, self.classify) print("dev_Precision: {} dev_Recall:{} dev_F1:{}".format( str(dev_p), str(dev_r), str(dev_f))) pred_labels = [] for batch in get_batch(test, constant.a_batch_size, False): pred_event_types, feed_dict = get_argument_feeddict( self, batch, False, "argument") pred_label = sess.run(self.pred_label, feed_dict=feed_dict) pred_labels.extend( list(zip(list(pred_event_types), list(pred_label)))) golds = list(zip(list(test[1]), list(test[2]))) test_p, test_r, test_f = f_score(pred_labels, golds, self.classify) print("test_Precision: {} test_Recall:{} test_F1:{}\n".format( str(test_p), str(test_r), str(test_f))) if dev_f > devbest: devbest = dev_f testbest = (test_p, test_r, test_f) test_p, test_r, test_f = testbest print( "test best Precision: {} test best Recall:{} test best F1:{}". format(str(test_p), str(test_r), str(test_f)))