Пример #1
0
def pred(indata, out):
    config = util.initialize_from_env()
    model = util.get_model(config)

    with tf.Session() as session:
        model.restore(session)

        with open(out, "w") as output_file:
            tensorized_example = model.tensorize_example(indata,
                                                         is_training=False)
            feed_dict = {
                i: t
                for i, t in zip(model.input_tensors, tensorized_example)
            }
            _, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(
                model.predictions, feed_dict=feed_dict)
            predicted_antecedents = model.get_predicted_antecedents(
                top_antecedents, top_antecedent_scores)
            indata["predicted_clusters"], _ = model.get_predicted_clusters(
                top_span_starts, top_span_ends, predicted_antecedents)
            indata["top_spans"] = list(
                zip((int(i) for i in top_span_starts),
                    (int(i) for i in top_span_ends)))
            indata['head_scores'] = []

            output_file.write(json.dumps(indata))
            output_file.write("\n")
Пример #2
0
def load_model():
    config = util.initialize_from_env()
    log_dir = config['log_dir']
    model = cm.CorefModel(config)
    with tf.Session() as session:
        vars_to_restore = [v for v in tf.global_variables() if 'pg_reward' not in v.name]
        saver = tf.train.Saver(vars_to_restore)
        saver.restore(session, os.path.join(log_dir, "model.max.ckpt"))
        all_vars = tf.trainable_variables()
        values = session.run(all_vars)
        return values
Пример #3
0
def run_1model(eval_path):

    outfpath = sys.argv[3] if len(sys.argv) == 4 else None

    sys.argv = sys.argv[:2]
    args = util.get_args()
    config = util.initialize_from_env(args.experiment, args.logdir)
    config['eval_path'] = eval_path

    model = cm.CorefModel(config, eval_mode=True)
    with tf.Session() as session:
        model.restore(session, args.latest_checkpoint)
        model.evaluate(session,
                       official_stdout=True,
                       pprint=False,
                       test=True,
                       outfpath=outfpath)
Пример #4
0
def main(_):
    config = util.initialize_from_env(use_tpu=FLAGS.use_tpu)

    tf.logging.set_verbosity(tf.logging.INFO)

    num_train_steps = config["num_docs"] * config["num_epochs"]
    # use_tpu = FLAGS.use_tpu
    if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
        raise ValueError(
            "At least one of `do_train`, `do_eval` or `do_predict' must be True."
        )

    tf.gfile.MakeDirs(FLAGS.output_dir)
    tpu_cluster_resolver = None
    if FLAGS.use_tpu and FLAGS.tpu_name:
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        master=FLAGS.master,
        model_dir=FLAGS.output_dir,
        save_checkpoints_steps=config["save_checkpoints_steps"],
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=FLAGS.iterations_per_loop,
            num_shards=FLAGS.num_tpu_cores,
            per_host_input_for_training=is_per_host))

    model_fn = model_fn_builder(config)
    estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu,
                                            model_fn=model_fn,
                                            config=run_config,
                                            train_batch_size=1,
                                            predict_batch_size=1)

    seq_length = config["max_segment_len"] * config["max_training_sentences"]

    if FLAGS.do_train:
        estimator.train(input_fn=file_based_input_fn_builder(config["train_path"], seq_length, config, is_training=True, drop_remainder=True), \
            max_steps=num_train_steps)
Пример #5
0
import subprocess
import sys

import tensorflow as tf
from model import VisCoref
import util


def set_log_file(fname):
    tee = subprocess.Popen(['tee', fname], stdin=subprocess.PIPE)
    os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
    os.dup2(tee.stdin.fileno(), sys.stderr.fileno())


if __name__ == "__main__":
    config = util.initialize_from_env()

    log_dir = config["log_dir"]
    writer = tf.summary.FileWriter(log_dir, flush_secs=20)
    log_file = os.path.join(log_dir, 'train.log')
    set_log_file(log_file)

    report_frequency = config["report_frequency"]
    eval_frequency = config["eval_frequency"]

    tf.set_random_seed(config['random_seed'])

    model = VisCoref(config)
    saver = tf.train.Saver()

    max_f1 = 0
    tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
    vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
                                            tokenization_info["do_lower_case"]])
    return tokenization.FullTokenizer(
        vocab_file=vocab_file, do_lower_case=do_lower_case)



BERT_MODEL_HUB = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"

session = tf.Session()
tokenizer = create_tokenizer_from_hub_module( BERT_MODEL_HUB  , session)



config = util.initialize_from_env("experiments4.conf")
report_frequency = 100
eval_frequency = 5000

model = cm.CorefModel(config, tokenizer, True)

saver = tf.train.Saver()

log_dir = 'logs/' + name
writer = tf.summary.FileWriter(log_dir, flush_secs=20)

max_f1 = 0


session.run(tf.global_variables_initializer())
# model.start_enqueue_thread(session)
Пример #7
0
    def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
        """The `model_fn` for TPUEstimator."""
        config = util.initialize_from_env(use_tpu=FLAGS.use_tpu)

        tmp_features = {}
        input_ids = features["flattened_input_ids"]
        input_mask = features["flattened_input_mask"]
        text_len = features["text_len"]

        speaker_ids = features["speaker_ids"]
        genre = features["genre"]
        gold_starts = features["span_starts"]
        gold_ends = features["span_ends"]
        cluster_ids = features["cluster_ids"]
        sentence_map = features["sentence_map"]
        span_mention = features["span_mention"]

        tmp_features["input_ids"] = input_ids
        tmp_features["input_mask"] = input_mask
        tmp_features["text_len"] = text_len
        tmp_features["speaker_ids"] = speaker_ids
        tmp_features["genre"] = genre
        tmp_features["gold_starts"] = gold_starts
        tmp_features["gold_ends"] = gold_ends
        tmp_features["speaker_ids"] = speaker_ids
        tmp_features["cluster_ids"] = cluster_ids
        tmp_features["sentence_map"] = sentence_map
        tmp_features["span_mention"] = span_mention

        tf.logging.info("********* Features *********")
        for name in sorted(tmp_features.keys()):
            tf.logging.info("  name = %s, shape = %s" %
                            (name, tmp_features[name].shape))

        is_training = (mode == tf.estimator.ModeKeys.TRAIN)

        model = util.get_model(config, model_sign="mention_proposal")

        tvars = tf.trainable_variables()
        # If you're using TF weights only, tf_checkpoint and init_checkpoint can be the same
        # Get the assignment map from the tensorflow checkpoint.
        # Depending on the extension, use TF/Pytorch to load weights.
        assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(
            tvars, config['tf_checkpoint'])
        init_from_checkpoint = tf.train.init_from_checkpoint  # if config['init_checkpoint'].endswith('ckpt') # else load_from_pytorch_checkpoint

        if FLAGS.use_tpu:

            def tpu_scaffold():
                init_from_checkpoint(config['init_checkpoint'], assignment_map)
                return tf.train.Scaffold()

            scaffold_fn = tpu_scaffold
        else:
            init_from_checkpoint(config['init_checkpoint'], assignment_map)
            scaffold_fn = None

        tf.logging.info("**** Trainable Variables ****")
        for var in tvars:
            init_string = ""
            if var.name in initialized_variable_names:
                init_string = ", *INIT_FROM_CKPT*"
            tf.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                            init_string)

        if is_training:
            tf.logging.info(
                "****************************** Training On TPU ******************************"
            )
            total_loss, start_scores, end_scores, span_scores = model.get_mention_proposal_and_loss(input_ids, input_mask, \
                text_len, speaker_ids, genre, is_training, gold_starts,
                gold_ends, cluster_ids, sentence_map, span_mention=span_mention)

            if config["device"] == "tpu":
                tf.logging.info("  name = %s, shape = %s" %
                                (name, features[name].shape))
                optimizer = tf.train.AdamOptimizer(
                    learning_rate=config['bert_learning_rate'],
                    beta1=0.9,
                    beta2=0.999,
                    epsilon=1e-08)
                optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
                train_op = optimizer.minimize(total_loss,
                                              tf.train.get_global_step())
            else:
                optimizer = RAdam(learning_rate=config['bert_learning_rate'],
                                  epsilon=1e-8,
                                  beta1=0.9,
                                  beta2=0.999)
                train_op = optimizer.minimize(total_loss,
                                              tf.train.get_global_step())

            # logging_hook = tf.train.LoggingTensorHook({"loss": total_loss}, every_n_iter=1)
            output_spec = tf.contrib.tpu.TPUEstimatorSpec(
                mode=mode,
                loss=total_loss,
                train_op=train_op,
                scaffold_fn=scaffold_fn)
        else:
            total_loss, start_scores, end_scores, span_scores = model.get_mention_proposal_and_loss(input_ids, input_mask, \
                text_len, speaker_ids, genre, is_training, gold_starts,
                gold_ends, cluster_ids, sentence_map, span_mention)

            predictions = {
                "total_loss": total_loss,
                "start_scores": start_scores,
                "end_scores": end_scores,
                "span_scores": span_scores
            }

            output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT, \
                predictions=predictions, \
                scaffold_fn=scaffold_fn)

        return output_spec
Пример #8
0
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

import util
import coref_model as cm

if __name__ == "__main__":
  args = util.get_args()
  config = util.initialize_from_env(args.experiment, args.logdir)
  config["eval_path"] = "test.english.jsonlines"
  config["conll_eval_path"] = "test.english.v4_gold_conll"
  config["context_embeddings"]["path"] = "glove.840B.300d.txt"

  model = cm.CorefModel(config, eval_mode=True)
  with tf.Session() as session:
    model.restore(session, args.latest_checkpoint)
    model.evaluate(session, official_stdout=True, pprint=False, test=True)
Пример #9
0
  parser.add_argument("name")
  parser.add_argument("--from_npy", type=str)
  parser.add_argument("--use_l1", action="store_true")
  parser.add_argument("--s0_model_type", type=str)
  parser.add_argument("--s0_model_path", type=str)
  parser.add_argument("--batch_size", type=int, default=32)
  parser.add_argument("--max_segment_len", type=int, default=512)
  parser.add_argument("--max_num_ctxs_in_batch", type=int, default=8)
  parser.add_argument("--anteced_top_k", type=int, default=5)
  parser.add_argument("--to_npy", type=str)

  args = parser.parse_args()
  # finish adding arguments

  config = util.initialize_from_env(name=args.name)
  model = util.get_model(config)
  saver = tf.train.Saver()
  log_dir = config["log_dir"]

  device = torch.device("cuda" if "GPU" in os.environ and torch.cuda.is_available() else "cpu")

  if args.use_l1:
    if args.s0_model_type in ["rnn", "RNN"]:
      rsa_model = RNNSpeakerRSAModel(args.s0_model_path, args.batch_size,
                                     args.max_segment_len,
                                     args.anteced_top_k,
                                     args.max_num_ctxs_in_batch,
                                     device,
                                     logger=None)
    if args.s0_model_type in ["gpt", "GPT"]:
Пример #10
0
    return speaker_dict


if __name__ == '__main__':
    # python3 build_data_to_tfrecord.py
    data_sign = "test"
    for sliding_window_size in [128]:
        print("=*=" * 20)
        print("current sliding window size is : {}".format(
            str(sliding_window_size)))
        print("=*=" * 20)
        for data_sign in ["train", "dev", "test"]:
            print("%*%" * 20)
            print(data_sign)
            print("%*%" * 20)
            config = util.initialize_from_env(use_tpu=False)
            language = "english"
            vocab_file = "/xiaoya/pretrain_ckpt/spanbert_base_cased/vocab.txt"
            input_data_dir = "/xiaoya/data"

            input_filename = "{}.english.{}.jsonlines".format(
                data_sign, str(sliding_window_size))

            output_data_dir = "/xiaoya/tpu_data/mention_proposal/span_all_{}_{}".format(
                str(sliding_window_size),
                str(config["max_training_sentences"]))
            os.makedirs(output_data_dir, exist_ok=True)
            output_filename = "{}.english.jsonlines".format(data_sign)
            print("$^$" * 30)
            print(output_data_dir, output_filename)
            print("$^$" * 30)
Пример #11
0
def train():
    config = util.initialize_from_env()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    random.seed(config['seed'])
    np.random.seed(config['seed'])
    torch.manual_seed(config['seed'])
    torch.cuda.manual_seed_all(config['seed'])
    torch.backends.cudnn.deterministic = True

    tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
    dataloader = CoNLLDataLoader(config, tokenizer, mode='train')
    train_dataloader = dataloader.get_dataloader(data_sign='train')
    eval_dataloader = dataloader.get_dataloader(data_sign='eval')
    test_dataloader = dataloader.get_dataloader(data_sign='test')

    fh = logging.FileHandler(os.path.join(config['log_dir'], 'coref_log.txt'), mode='w')
    fh.setFormatter(logging.Formatter(format))
    logger.addHandler(fh)
    log_dir = config['log_dir']

    best_dev_f1, best_dev_pre, best_dev_recall = 0.0, 0.0, 0.0
    test_f1_when_dev_best, test_prec_when_dev_best, test_rec_when_dev_best = 0.0, 0.0, 0.0

    model = CorefModel(config)
    bert_optimizer, task_optimizer = build_optimizer(model, config)

    num_train_steps = int(config['num_docs'] * config['num_epochs'])
    num_warmup_steps = int(num_train_steps * 0.1)
    bert_poly_decay_scheduler = PolynomialLRDecay(optimizer=bert_optimizer,
                                                  max_decay_steps=num_train_steps,
                                                  end_learning_rate=0.0,
                                                  power=1.0)
    task_poly_decay_scheduler = PolynomialLRDecay(optimizer=task_optimizer,
                                                  max_decay_steps=num_train_steps,
                                                  end_learning_rate=0.0,
                                                  power=1.0)

    step = 0
    report_frequency = config["report_frequency"]
    eval_frequency = config["eval_frequency"]
    writer = SummaryWriter(log_dir=log_dir)
    accumulated_loss = 0.0
    model.to(device)
    model.train()
    for epoch in range(config['num_epochs']):
        logger.info("=*=" * 20)
        logger.info("start {} Epoch ... ".format(str(epoch)))

        for i, batch in enumerate(train_dataloader):
            input_ids, input_mask, text_len, speaker_ids, genre, gold_starts, gold_ends, cluster_ids, sentence_map, \
            subtoken_map = [b.to(device) for b in batch[1:]]
            bert_optimizer.zero_grad()
            task_optimizer.zero_grad()
            _, loss = model(input_ids, input_mask, text_len, speaker_ids, genre, gold_starts, gold_ends, cluster_ids,
                            sentence_map, subtoken_map)
            accumulated_loss += loss.item()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            bert_optimizer.step()
            task_optimizer.step()

            if step > 0 and step % report_frequency == 0:
                average_loss = accumulated_loss / report_frequency
                logger.info("[{}] loss={:.2f}".format(step, average_loss))
                writer.add_scalar('Loss', average_loss, step)
                accumulated_loss = 0.0

            if step > 0 and step % eval_frequency == 0:
                coref_p, coref_r, coref_f = evaluate(model, eval_dataloader, config['eval_path'], device)
                logger.info("***** EVAL ON DEV SET *****")
                logger.info(
                    "***** [DEV EVAL COREF] ***** : precision: {:.2f}, recall: {:.2f}, f1: {:.2f}".format(coref_p * 100,
                                                                                                          coref_r * 100,
                                                                                                          coref_f * 100))
                writer.add_scalar('Dev/F1', coref_f, step)
                writer.add_scalar('Dev/Precision', coref_p, step)
                writer.add_scalar('Dev/Recall', coref_r, step)
                if coref_f > best_dev_f1:
                    best_dev_f1 = coref_f
                    best_dev_pre = coref_p
                    best_dev_recall = coref_r
                    test_coref_p, test_coref_r, test_coref_f = \
                        evaluate(model, test_dataloader, config['test_path'], device)
                    test_f1_when_dev_best, test_prec_when_dev_best, test_rec_when_dev_best = test_coref_f, \
                                                                                             test_coref_p, \
                                                                                             test_coref_r
                    logger.info("***** EVAL ON TEST SET *****")
                    logger.info(
                        "***** [TEST EVAL COREF] ***** : precision: {:.2f}, recall: {:.2f}, f1: {:.2f}".format(
                            test_coref_p * 100,
                            test_coref_r * 100,
                            test_coref_f * 100))

                    logger.info("***** SAVE MODEL *****")
                    model_to_save = model.module if hasattr(model, 'module') else model
                    torch.save(model_to_save.state_dict(), os.path.join(log_dir, "model_best.checkpoint"))
                    writer.add_scalar('Best/dev/f1', best_dev_f1, step)
                    writer.add_scalar('Best/dev/p', best_dev_pre, step)
                    writer.add_scalar('Best/dev/r', best_dev_recall, step)
                    writer.add_scalar('Best/test/f1', test_coref_f, step)
                    writer.add_scalar('Best/test/p', test_coref_p, step)
                    writer.add_scalar('Best/test/r', test_coref_r, step)

                model.train()

            step += 1
            if step < num_warmup_steps:
                bert_lr = warmup_linear(bert_optimizer, config, step + 1, num_warmup_steps)
            else:
                bert_poly_decay_scheduler.step(step)
                bert_lr = bert_poly_decay_scheduler.get_last_lr()[0]
            task_poly_decay_scheduler.step()
            task_lr = task_poly_decay_scheduler.get_last_lr()[0]
            writer.add_scalar('Bert Learning Rate', bert_lr, step)
            writer.add_scalar('Task Learning Rate', task_lr, step)

    logger.info("*" * 20)
    logger.info(
        "- @@@@@ BEST DEV F1 : {:.2f}, Precision : {:.2f}, Recall : {:.2f},".format(best_dev_f1 * 100,
                                                                                    best_dev_pre * 100,
                                                                                    best_dev_recall * 100))
    logger.info("- @@@@@ TEST when DEV best F1 : {:.2f}, Precision : {:.2f}, Recall : {:.2f},".format(
        test_f1_when_dev_best * 100, test_prec_when_dev_best * 100, test_rec_when_dev_best * 100))
Пример #12
0
                        help="Input file in .jsonlines format")
    parser.add_argument(
        "--output_path",
        type=str,
        help="Predictions will be written to this file in .jsonlines format.")
    parser.add_argument("--npy_output_path",
                        type=str,
                        help="Output npy pickle file with model scores")
    args = parser.parse_args()

    model_name = args.model_name
    input_filename = args.input_path
    output_filename = args.output_path
    npy_output_filename = args.npy_output_path

    config = util.initialize_from_env(name=model_name)
    log_dir = config["log_dir"]

    model = util.get_model(config)
    saver = tf.train.Saver()

    k_sum = 0
    c_sum = 0
    count = 0

    if npy_output_filename:
        data_dicts = []

    with tf.Session() as session:
        model.restore(session)
Пример #13
0
import os

import tensorflow as tf
import util
import sys


def read_doc_keys(fname):
    keys = set()
    with open(fname) as f:
        for line in f:
            keys.add(line.strip())
    return keys


if __name__ == "__main__":
    # config = util.initialize_from_env(eval_test=True)

    experiments_conf = "experiments.conf"
    if len(sys.argv) > 2:
        experiments_conf = sys.argv[2]
    config = util.initialize_from_env(eval_test=experiments_conf)
    model = util.get_model(config)
    saver = tf.train.Saver()
    log_dir = config["log_dir"]
    with tf.Session() as session:
        model.restore(session)
        # Make sure eval mode is True if you want official conll results
        model.evaluate(session, official_stdout=True, eval_mode=True)
Пример #14
0
    subprocess.run(args, check=True)


if __name__ == "__main__":

    two_models = "," in sys.argv[1]
    input_filename = sys.argv[2]
    output_filename = sys.argv[3]

    if two_models:
        exp1, exp2 = sys.argv[1].split(",")
        run_2models(
            exp1,
            exp2,
            input_filename=input_filename,
            output_filename=output_filename,
        )
    else:
        config = util.initialize_from_env(sys.argv[1])
        must_bertify = "--no-bertify" not in sys.argv
        cluster_key = "clusters" if "--no-predicted" in sys.argv else "predicted_clusters"
        config['lm_path'] = "bert_features_predict.hdf5"
        if must_bertify:
            bertify(input_filename, config['lm_path'], config)
        run_1model(
            config=config,
            input_filename=input_filename,
            output_filename=output_filename,
            cluster_key=cluster_key,
        )
Пример #15
0
logging.basicConfig(format=format)
logger = logging.getLogger()
logger.setLevel(logging.INFO)


def read_doc_keys(fname):
    keys = set()
    with open(fname) as f:
        for line in f:
            keys.add(line.strip())
    return keys


if __name__ == "__main__":
    # Eval dev
    # config = util.initialize_from_env()
    # model = util.get_model(config)
    # saver = tf.train.Saver()
    # with tf.Session() as session:
    #   model.restore(session)
    #   # Make sure eval mode is True if you want official conll results
    #   model.evaluate(session, official_stdout=True, eval_mode=True, visualize=False)

    # Eval test
    config = util.initialize_from_env(name_suffix='May14_06-02-15')
    model = util.get_model(config)
    saver = tf.train.Saver()
    with tf.Session() as session:
        model.restore(session)
        model.evaluate_test(session, official_stdout=True, eval_mode=True)
Пример #16
0
def main():
    config = util.initialize_from_env()

    report_frequency = config["report_frequency"]
    eval_frequency = config["eval_frequency"]

    model = util.get_model(config)
    saver = tf.train.Saver()

    log_dir = config["log_dir"]
    max_steps = config['num_epochs'] * config['num_docs']
    writer = tf.summary.FileWriter(log_dir, flush_secs=20)

    max_f1 = 0
    mode = 'w'

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        model.start_enqueue_thread(session)
        accumulated_loss = 0.0

        initial_step = 0
        ckpt = tf.train.get_checkpoint_state(log_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Restoring from: {}".format(ckpt.model_checkpoint_path))
            saver.restore(session, ckpt.model_checkpoint_path)
            mode = 'a'
            initial_step = int(
                os.path.basename(ckpt.model_checkpoint_path).split('-')[1])
        fh = logging.FileHandler(os.path.join(log_dir, 'stdout.log'),
                                 mode=mode)
        fh.setFormatter(logging.Formatter(format))
        logger.addHandler(fh)

        initial_time = time.time()
        while True:
            tf_loss, tf_global_step, _ = session.run(
                [model.loss, model.global_step, model.train_op])
            accumulated_loss += tf_loss
            # print('tf global_step', tf_global_step)

            if tf_global_step % report_frequency == 0:
                steps_per_second = (tf_global_step - initial_step) / (
                    time.time() - initial_time)

                average_loss = accumulated_loss / report_frequency
                logger.info("[{}] loss={:.2f}, steps/s={:.2f}".format(
                    tf_global_step, average_loss, steps_per_second))
                writer.add_summary(util.make_summary({"loss": average_loss}),
                                   tf_global_step)
                accumulated_loss = 0.0

            if tf_global_step % eval_frequency == 0:
                eval_summary, eval_f1 = model.evaluate(session)

                if eval_f1 > max_f1:
                    max_f1 = eval_f1
                    saver.save(session,
                               os.path.join(log_dir, "model"),
                               global_step=tf_global_step)
                    util.copy_checkpoint(
                        os.path.join(log_dir,
                                     "model-{}".format(tf_global_step)),
                        os.path.join(log_dir, "model.max.ckpt"))

                writer.add_summary(eval_summary, tf_global_step)
                writer.add_summary(util.make_summary({"max_eval_f1": max_f1}),
                                   tf_global_step)

                logger.info("[{}] evaL_f1={:.4f}, max_f1={:.4f}".format(
                    tf_global_step, eval_f1, max_f1))
                if tf_global_step > max_steps:
                    break