示例#1
0
def create_config():
    c = Config("", "-m", "test")
    c.update({"verbose": 2, "timeout": 1, "embedding_layer_dim": 1, "ner_dim": 1, "action_dim": 1, "lemma_dim": 1,
              "max_words_external": 3, "word_dim_external": 1, "word_dim": 1, "max_words": 3, "max_lemmas": 3,
              "max_tags": 3, "max_pos": 3, "max_deps": 3, "max_edge_labels": 3, "max_puncts": 3, "max_action_types": 3,
              "max_ner_types": 3, "edge_label_dim": 1, "tag_dim": 1, "pos_dim": 1, "dep_dim": 1, "optimizer": "sgd",
              "output_dim": 1, "layer_dim": 2, "layers": 3, "lstm_layer_dim": 2, "lstm_layers": 3,
              "max_action_ratio": 10, "update_word_vectors": False, "copy_shared": None})
    c.update_hyperparams(shared={"lstm_layer_dim": 2, "lstm_layers": 1}, ucca={"word_dim": 2},
                         amr={"max_node_labels": 3, "max_node_categories": 3,
                              "node_label_dim": 1, "node_category_dim": 1})
    return c
示例#2
0
    def __init__(self):
        config = Config()
        setting = Settings(*('implicit'))
        config.update(setting.dict())
        config.set_format("ucca")
        self.feature_extractor = DenseFeatureExtractor(
            OrderedDict(),
            indexed=config.args.classifier != 'mlp',
            hierarchical=False,
            node_dropout=config.args.node_dropout,
            omit_features=config.args.omit_features)

        self.sess = tf.Session()
        saver = tf.train.import_meta_graph(glob('env_r_model-*.meta')[0])
        saver.restore(self.sess, tf.train.latest_checkpoint('./'))
        graph = tf.get_default_graph()
        self.x = graph.get_tensor_by_name("Placeholder:0")
        self.y = graph.get_tensor_by_name("dense_2/BiasAdd:0")
        self.length = None
示例#3
0
def config():
    c = Config("", "-m", "test")
    c.update({"no_node_labels": True, "evaluate": True, "minibatch_size": 50})
    c.update_hyperparams(shared={"layer_dim": 50})
    return c
示例#4
0
def config():
    c = Config("", "-m", "test")
    c.update({"no_node_labels": True, "evaluate": True, "minibatch_size": 50})
    c.update_hyperparams(shared={"layer_dim": 50})
    return c
def produce_oracle(filename, feature_extractor):
    passage = load_passage(filename)
    sys.stdout.write('.')
    sys.stdout.flush()
    #store_sequence_to = "data/oracles/%s/%s.txt" % (cat, basename(filename))#, setting.suffix())
    #with open(store_sequence_to, "w", encoding="utf-8") as f:
    #    for i, action in enumerate(gen_actions(passage, feature_extractor)):
    #        pass#print(action, file=f)
    for _ in gen_actions(passage, feature_extractor):
        pass


if __name__ == "__main__":
    config = Config()
    setting = Settings(*('implicit'))
    config.update(setting.dict())
    config.set_format("ucca")
    feature_extractor = DenseFeatureExtractor(
        OrderedDict(),
        indexed=config.args.classifier != 'mlp',
        hierarchical=False,
        node_dropout=config.args.node_dropout,
        omit_features=config.args.omit_features)

    filenames = passage_files(sys.argv[1])
    for filename in filenames:  #TODO: solve the problem of "KILLED" while wring file. Use 100 files temporarily before solving this.
        produce_oracle(filename, feature_extractor)

    # dump envTrainingData to a file for further learning in rewardNN.py
    json_str = json.dumps(envTrainingData) + "\n"
    json_bytes = json_str.encode('utf-8')