Пример #1
0
def main(_):
    if not FLAGS.ner_data_path:
        raise ValueError("No data files found in 'data_path' folder")

    raw_data = reader.load_data(FLAGS.ner_data_path)
    # train_data, valid_data, test_data, _ = raw_data
    train_word, train_tag, dev_word, dev_tag, test_word, test_tag, vocabulary = raw_data

    config = get_config(FLAGS.ner_lang)

    eval_config = get_config(FLAGS.ner_lang)
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope(FLAGS.ner_scope_name,
                               reuse=None,
                               initializer=initializer):
            m = NERTagger(is_training=True, config=config)
        with tf.variable_scope(FLAGS.ner_scope_name,
                               reuse=True,
                               initializer=initializer):
            mvalid = NERTagger(is_training=False, config=config)
            mtest = NERTagger(is_training=False, config=eval_config)

        # CheckPoint State
        ckpt = tf.train.get_checkpoint_state(FLAGS.ner_train_dir)
        if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
            print("Loading model parameters from %s" %
                  ckpt.model_checkpoint_path)
            m.saver.restore(session, ckpt.model_checkpoint_path)
        else:
            print("Created model with fresh parameters.")
            session.run(tf.initialize_all_variables())

        # tf.initialize_all_variables().run()

        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay**max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(session,
                                         m,
                                         train_word,
                                         train_tag,
                                         m.train_op,
                                         verbose=True)
            print("Epoch: %d Train Perplexity: %.3f" %
                  (i + 1, train_perplexity))
            valid_perplexity = run_epoch(session, mvalid, dev_word, dev_tag,
                                         tf.no_op())
            print("Epoch: %d Valid Perplexity: %.3f" %
                  (i + 1, valid_perplexity))

        test_perplexity = run_epoch(session, mtest, test_word, test_tag,
                                    tf.no_op())
        print("Test Perplexity: %.3f" % test_perplexity)
Пример #2
0
def main(_):
    if not FLAGS.ner_data_path:
        raise ValueError("No data files found in 'data_path' folder")

    # Load Data
    raw_data = reader.load_data(FLAGS.ner_data_path)
    train_word, train_tag, dev_word, dev_tag, test_word, test_tag, vocabulary = raw_data

    # Load Config
    config_dict = load_config(FLAGS.ner_model_config_path)
    config = get_config(config_dict, FLAGS.ner_lang)
    eval_config = get_config(config_dict, FLAGS.ner_lang)
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    # Load Model Variable Scope
    model_var_scope = get_model_var_scope(FLAGS.ner_scope_name, FLAGS.ner_lang)

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope(model_var_scope,
                               reuse=True,
                               initializer=initializer):
            m = NERTagger(is_training=True, config=config)
        with tf.variable_scope(model_var_scope,
                               reuse=True,
                               initializer=initializer):
            mvalid = NERTagger(is_training=False, config=config)
            mtest = NERTagger(is_training=False, config=eval_config)

        # CheckPoint State
        ckpt = tf.train.get_checkpoint_state(FLAGS.ner_train_dir)
        if ckpt:
            print("Loading model parameters from %s" %
                  ckpt.model_checkpoint_path)
            m.saver.restore(session,
                            tf.train.latest_checkpoint(FLAGS.ner_train_dir))
        else:
            print("Created model with fresh parameters.")
            session.run(tf.global_variables_initializer())

        # write the graph out for further use e.g. C++ API call
        tf.train.write_graph(session.graph_def,
                             './models/',
                             'ner_graph.pbtxt',
                             as_text=True)  # output is text

        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay**max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(session,
                                         m,
                                         train_word,
                                         train_tag,
                                         m.train_op,
                                         verbose=True)
            print("Epoch: %d Train Perplexity: %.3f" %
                  (i + 1, train_perplexity))
            valid_perplexity = run_epoch(session, mvalid, dev_word, dev_tag,
                                         tf.no_op())
            print("Epoch: %d Valid Perplexity: %.3f" %
                  (i + 1, valid_perplexity))

        test_perplexity = run_epoch(session, mtest, test_word, test_tag,
                                    tf.no_op())
        print("Test Perplexity: %.3f" % test_perplexity)