Пример #1
0
cnn_setting["num_classes"] = len(properties_list)
cnn_setting["dim_pos"] = int(c_para.get("Global", "pos_dim"))
cnn_setting["num_filters"] = int(c_para.get("CNN", "num_filters"))
policy_setting = {}
policy_setting["state_dim"] = cnn_setting[
    "num_filters"] * 2 + dim_entity * 2 + dim_relation
policy_setting["learning_rate"] = float(c_para.get("RL", "learning_rate"))
assign_rate = float(c_para.get("RL", "assign_rate"))
log_file = c_path.get("RL", "log_file")
log_fo = open(log_file, 'w', encoding='utf-8')
del (entity2id_dict, entities_list)
best_reward = -100000
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
    cnn_main = cnn_class.CNN(cnn_setting, cnn_main_net)
    cnn_target = cnn_class.CNN(cnn_setting, cnn_target_net)
    cnn_best = cnn_class.CNN(cnn_setting, cnn_best_net_name)
    cnn_main.build_model()
    cnn_target.build_model()
    cnn_best.build_model()
    agent_main = rl_class.Agent(policy_setting, policy_main_net_name)
    agent_target = rl_class.Agent(policy_setting, policy_target_net_name)
    agent_best = rl_class.Agent(policy_setting, policy_best_net_name)
    agent_main.build_model()
    agent_target.build_model()
    agent_best.build_model()
    sess.run(tf.global_variables_initializer())
    utils.load_model(sess, pre_cnn_path, cnn_main_net)
    utils.load_model(sess, pre_rl_path, policy_main_net_name)
    sess.run(cnn_main.WV.assign(wv_npy))
Пример #2
0
cnn_setting["vocab_size"] = vocab_size
policy_setting = {}
policy_setting["state_dim"] = cnn_setting[
    "num_filters"] * 2 + dim_entity * 2 + dim_relation
policy_setting["learning_rate"] = learning_rate

properties_list = fo.load_relations(relation_data)
positionVec_npy = fo.load_pos_vec(position_vec_path)
entity2id_dict, entity_vec_npy = fo.load_entity2vec(entity_id_path,
                                                    entity_vec_path)
sents_list, en1_position_list, en2_position_list, y_list, Bags = fo.load_agent_test_data(
    test_data, cnn_setting["max_sequence_length"], num_classes, vocab_dict,
    properties_list, entity2id_dict)
selected_sents = []
with tf.Session() as sess:
    cnn = cnn_class.CNN(cnn_setting, "cnn_target")
    agent = rl_class.Agent(policy_setting, "policy_target")
    cnn.build_model()
    agent.build_model()
    sess.run(tf.global_variables_initializer())
    utils.load_model(sess, cnn_model_path, "cnn_target")
    utils.load_model(sess, agent_model_path, "policy_target")
    sess.run(cnn.WV.assign(wv_npy))
    sess.run(cnn.POS.assign(positionVec_npy))
    env = env_class.Environment(sess,
                                cnn,
                                sents_list,
                                y_list,
                                en1_position_list,
                                en2_position_list,
                                entity_vec_npy,
Пример #3
0
# print(len(vocab_dict.keys()),len(wv_npy))
# vocab_size = len(vocab_dict.keys())
# sents_list,en1_position_list,en2_position_list,lbox_infos,dict_data,e1_list,e2_list = fo.load_extract_data(input_data,cnn_setting["max_sequence_length"],vocab_dict)
vocab_dict, wv_npy, sents_list, en1_position_list, en2_position_list, e1_list, e2_list, lbox_ids, dict_data = fo.load_extract_data(
    input_data, w2v_data, cnn_setting["dim_word"],
    cnn_setting["max_sequence_length"])
vocab_size = len(vocab_dict.keys())
num_classes = len(properties_list)
positionVec_npy = fo.load_pos_vec(position_vec_path)
total_data_size = len(sents_list)
print("[DONE] Loading data")
print("[BUILD] CNN model")
cnn_setting["vocab_size"] = vocab_size

with tf.Session() as sess:
    cnn = cnn_class.CNN(cnn_setting, net_name)
    cnn.build_model()
    sess.run(tf.global_variables_initializer())
    utils.load_model(sess, model_path, net_name)
    sess.run(cnn.WV.assign(wv_npy))
    sess.run(cnn.POS.assign(positionVec_npy))

    total_batch_size = int(total_data_size / batch_size) + 1
    predicted_y = []
    probs = []
    for batch in tqdm(range(total_batch_size)):
        st = batch * batch_size
        en = min((batch + 1) * batch_size, total_data_size)
        batch_sents = sents_list[st:en]
        batch_pos1 = en1_position_list[st:en]
        batch_pos2 = en2_position_list[st:en]