コード例 #1
0
def main(_):
    if FLAGS.train:
        if FLAGS.clean:
            clean(FLAGS)
        main_train()
    else:
        evaluate_one()
コード例 #2
0
ファイル: test_BiLSTM_CRF.py プロジェクト: kepu-cctv/DeepNER
def main(_):
    FLAGS.train = True
    if FLAGS.train:
        #if FLAGS.clean:
        clean(FLAGS)
        train()
    else:
        evaluate_line()
コード例 #3
0
def main(args):
    if args.train:
        if args.clean:
            clean(args)
        print("开始训练模型!!!")
        train()
    else:
        evaluate_line()
コード例 #4
0
def load_json(p, lower):
    source = []
    tgt = []
    flag = False
    for sent in json.load(open(p, 'r', encoding='UTF-8'))['sentences']:
        tokens = [t['word'] for t in sent['tokens']]
        if lower:
            tokens = [t.lower() for t in tokens]
        if tokens[0] == '@highlight':
            flag = True
            continue
        if flag:
            tgt.append(tokens)
            flag = False
        else:
            source.append(tokens)

    source = [clean(' '.join(sent)).split() for sent in source]
    tgt = [clean(' '.join(sent)).split() for sent in tgt]
    return source, tgt
コード例 #5
0
ファイル: main_cnn.py プロジェクト: shxliang/text-cnn-rnn
def save_cnn_for_java():
    """
    保存模型用于Java加载调用
    :return:
    """
    config = load_config(FLAGS.config_file)
    model = TextCNN(config)

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(sess=session, save_path=FLAGS.save_dir)  # 读取保存的模型

        builder = tf.saved_model.builder.SavedModelBuilder("tmp/cnn_model")
        builder.add_meta_graph_and_variables(
            session, [tf.saved_model.tag_constants.SERVING])
        builder.save()


if __name__ == "__main__":
    if FLAGS.train:
        if FLAGS.clean:
            clean(FLAGS)
            mkdir(FLAGS)
        main_train()
    else:
        main_test()
        # evaluate_one(TextCNN, FLAGS)

    # save_cnn_for_java()
コード例 #6
0
ファイル: main.py プロジェクト: memoiry/NLU
def train_ner():
    clean(FLAGS)
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                     FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)
    update_tag_scheme(dev_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), FLAGS.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), 0, len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        for i in range(25):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
            evaluate(sess, model, "test", test_manager, id_to_tag, logger)
コード例 #7
0
def main(_):
    FLAGS.train = True
    FLAGS.clean = True
    clean(FLAGS)

    train()