Example #1
0
 def train_epoch(self, train_set, valid_set, epoch):
     num_batches = len(train_set)
     prog = Progbar(target=num_batches)
     for i, batch_data in enumerate(train_set):
         feed_dict = self._get_feed_dict(batch_data,
                                         is_train=True,
                                         keep_prob=self.cfg["keep_prob"],
                                         lr=self.cfg["lr"])
         _, train_loss, summary = self.sess.run(
             [self.train_op, self.loss, self.summary], feed_dict=feed_dict)
         cur_step = (epoch - 1) * num_batches + (i + 1)
         prog.update(i + 1, [("Global Step", int(cur_step)),
                             ("Train Loss", train_loss)])
         if i % 100 == 0:
             self.train_writer.add_summary(summary, cur_step)
             step = cur_step
     for j, batch_data in enumerate(valid_set):
         feed_dict = self._get_feed_dict(batch_data)
         val_summary = self.sess.run(self.summary, feed_dict=feed_dict)
     self.test_writer.add_summary(val_summary, step)
     micro_f_val, out_str, micro = self.evaluate_punct(valid_set, "val")
     return micro_f_val, train_loss
Example #2
0
if not os.path.exists(config.ckpt):
    os.makedirs(config.ckpt)

# training the model
print("start training...")
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
    # build model
    print("build model...")
    model = Model(config, verb_vocab_count)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(max_to_keep=1)
    for epoch in range(config.epochs):
        prog = Progbar(target=int(2.663e8) / config.batch_size)
        for i, data in enumerate(
                dataset_iterator(config.dataset, word_dict, verb_dict,
                                 config.batch_size)):
            feed_dict = model.get_feed_dict(data, is_train=True, lr=config.lr)
            _, losses = sess.run([model.train_op, model.loss],
                                 feed_dict=feed_dict)
            prog.update(i + 1, [("train loss", losses)])
    # save the model
    saver.save(sess,
               config.ckpt + config.model_name,
               global_step=config.epochs)
    # save the trained target embedding
    target_emb = sess.run(model.verb_embeddings)
    np.savez_compressed("data/trained_target_emb.npz", embeddings=target_emb)