def create_maps(sentences):
    """
    建立字和标签的字典
    """
    
    if config.pre_emb:
        
        """ 首先利用训练集建立字典 """
        dico_chars_train, _, _ = char_mapping(sentences, config.lower)
        
        """ 预训练字向量中的字,如果不在上面的字典中,则加入 """
        dico_chars, char_to_id, id_to_char = augment_with_pretrained(dico_chars_train.copy(),
                                                                     config.emb_file)
        
    else:
        
        """ 只利用训练集建立字典 """
        _, char_to_id, id_to_char = char_mapping(sentences, config.lower)
    
    """ 利用训练集建立标签字典 """
    _, tag_to_id, id_to_tag = tag_mapping(sentences)
                   
    with open(config.map_file, "wb") as f:
        pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
        
    return char_to_id, id_to_char, tag_to_id, id_to_tag
Example #2
0
def train():
	#load data
	train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower, FLAGS.zeros)
	dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
	test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

	if not os.path.isfile(FLAGS.map_file):
		if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences])
                )
            )
Example #3
0
	train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower, FLAGS.zeros)
	dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
	test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

	if not os.path.isfile(FLAGS.map_file):
		if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences])
                )
            )
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences, FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)   

    train_data = prepare_dataset(
        train_sentences, char_to_id, tag_to_id, FLAGS.lower, FLAGS.self_seg
    )
    dev_data = prepare_dataset(
        dev_sentences, char_to_id, tag_to_id, FLAGS.lower, FLAGS.self_seg
    )
Example #4
0
def train():
    # load data sets
    train_sentences=load_sentences(FLAGS.train_file,FLAGS.zeros)
    dev_sentences=load_sentences(FLAGS.dev_file,FLAGS.zeros)
    test_sentences=load_sentences(FLAGS.test_file,FLAGS.zeros)

    # appoint tagging scheme (IOB/IOBES)
    train_sentences=update_tag_scheme(train_sentences,FLAGS.tag_schema)
    dev_sentences=update_tag_scheme(dev_sentences,FLAGS.tag_schema)
    test_sentences=update_tag_scheme(test_sentences,FLAGS.tag_schema)

    #create maps if not exist
    if not os.path.exists(FLAGS.map_file):
        if FLAGS.pre_emb:
            char_to_id,_=char_mapping(train_sentences)
            char_to_id,id_to_char=augment_with_pretrained(char_to_id,'wiki_100.utf8')
        else:
            char_to_id, id_to_char=char_mapping(train_sentences)
        tag_to_id, id_to_tag=tag_mapping(train_sentences)
        with open(FLAGS.map_file,'wb') as f:
            cPickle.dump([char_to_id,id_to_char,tag_to_id,id_to_tag],f,cPickle.HIGHEST_PROTOCOL)
    else:
        with open(FLAGS.map_file,'rb') as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag=cPickle.load(f)

    # prepare data, get a collection of list containing index
    train_data=prepare_dataset(train_sentences,char_to_id,tag_to_id,True)
    dev_data=prepare_dataset(dev_sentences,char_to_id,tag_to_id,True)
    test_data=prepare_dataset(test_sentences,char_to_id,tag_to_id,True)
    print "%i %i %i sentences in train / dev / test." % (len(train_data),len(dev_data),len(test_data))

    if not FLAGS.pre_emb:
        pre_emb=None
    else:
        pre_emb=load_word2vec(FLAGS.pre_emb_file,char_to_id,FLAGS.char_dim)
        print "init embedding shape: (%d,%d)" %(pre_emb.shape[0],pre_emb.shape[1])

    train_manager=BatchManager(train_data,FLAGS.batch_size,True)
    dev_manager=BatchManager(dev_data,FLAGS.batch_size,False)
    test_manager=BatchManager(test_data,FLAGS.batch_size,False)

    config=BasicModelConfig(FLAGS,len(char_to_id),len(tag_to_id),4)
    tfConfig = tf.ConfigProto()
    tfConfig.gpu_options.per_process_gpu_memory_fraction = FLAGS.memory_usage
    with tf.Session(config=tfConfig) as sess:
        print "Train started!"
        model=BasicModel(config,pre_emb)
        saver=tf.train.Saver()

        # tensorboard
        if not os.path.exists(FLAGS.summaries_dir):
            os.mkdir(FLAGS.summaries_dir)
        merged=tf.summary.merge_all()
        train_writer=tf.summary.FileWriter(os.path.join(FLAGS.summaries_dir,FLAGS.model_name,"train"),sess.graph)
        test_writer=tf.summary.FileWriter(os.path.join(FLAGS.summaries_dir,FLAGS.model_name,"test"),sess.graph)

        # load previous trained model or create a new model
        if not os.path.exists(FLAGS.checkpoints):
            os.mkdir(FLAGS.checkpoints)
        model_name=os.path.join(FLAGS.checkpoints,FLAGS.model_name)
        ckpt=tf.train.get_checkpoint_state(FLAGS.checkpoints)
        if ckpt and ckpt.model_checkpoint_path:
            print "restore from previous traied model: %s" % FLAGS.model_name
            saver.restore(sess,ckpt.model_checkpoint_path)
        else:
            sess.run(tf.global_variables_initializer())

        def evaluate(sess,model,manager):
            strings=[]
            predicts=[]
            goldens=[]
            bar = ProgressBar(max_value=manager.num_batch)
            for batch in bar(manager.iter_batch()):
                batch_string,batch_predict,batch_golden=model.evaluate_step(sess,batch)
                strings.extend(batch_string)
                predicts.extend(batch_predict)
                goldens.extend(batch_golden)
            return strings,predicts,goldens

        best_eval_f1=0
        noimpro_num=0
        for i in range(FLAGS.max_epoch):
            #train
            train_loss=[]
            bar = ProgressBar(max_value=train_manager.num_batch)
            for step,batch in bar(enumerate(train_manager.iter_batch())):
                batch.append(merged)
                summary,global_step,batch_loss=model.train_step(sess,batch,FLAGS.dropout_keep)
                #add summary to tensorboard
                train_writer.add_summary(summary,global_step)
                train_loss.append(batch_loss)
            print "Epoch %d Train loss is %.4f" % (i+1,np.mean(train_loss))

            #dev
            strings,predicts,goldens=evaluate(sess,model,dev_manager)
            eval_f1=report_results(strings,predicts,goldens,id_to_char,id_to_tag,'outputs/dev')
            if eval_f1>best_eval_f1:
                best_eval_f1=eval_f1
                noimpro_num=0
                saver.save(sess,model_name)
            else:
                noimpro_num+=1
            print "Epoch %d Best eval f1:%.6f" % (i+1,best_eval_f1)

            #test
            strings,predicts,goldens=evaluate(sess,model,test_manager)
            test_f1=report_results(strings,predicts,goldens,id_to_char,id_to_tag,'outputs/test',True)
            #early_stop
            if noimpro_num>=3:
                print "Early stop! Final F1 scores on test data is :%.6f" % test_f1
                break
            print
Example #5
0
def train():
    # 加载数据集
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                     FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # 选择tag形式 (IOB / IOBES)  默认使用IOBES
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    if not os.path.isfile(FLAGS.map_file):
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), FLAGS.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            # {'S-LOC': 10, 'E-LOC': 3, 'B-ORG': 4, 'S-PER': 11, 'S-ORG': 12, 'O': 0,
            # 'E-ORG': 5, 'I-LOC': 6, 'I-PER': 7, 'I-ORG': 1, 'B-PER': 8, 'B-LOC': 2, 'E-PER': 9}
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # 转化成数字化的数据
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), len(dev_data), len(test_data)))

    #长度不足补0
    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)

    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # GPU设置
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data

    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    # 每100次算一次平均loss
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
            evaluate(sess, model, "test", test_manager, id_to_tag, logger)