Exemplo n.º 1
0
    def __init__(self,
                 input_dimension,
                 variables,
                 attention_heads=1,
                 variable_prefix="",
                 strategy=None,
                 attention_dropout=0.0):
        self.strategy = strategy
        self.input_dimension = input_dimension
        self.heads = attention_heads

        self.variable_prefix = variable_prefix
        if self.variable_prefix != "":
            self.variable_prefix += "_"

        self.variables = variables

        self.linear_key = MultilayerPerceptron(
            [int(0.5 * self.input_dimension),
             int(0.5 * self.input_dimension)], self.variables,
            self.variable_prefix + "_key_transform")
        self.linear_value = MultilayerPerceptron(
            [int(0.5 * self.input_dimension),
             int(0.5 * self.input_dimension)], self.variables,
            self.variable_prefix + "_value_transform")
        self.attention_dropout = attention_dropout
    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.lstms = [
            BiLstm(self.variables,
                   self.model_settings["word_embedding_dimension"],
                   variable_prefix="lstm_" + str(i))
            for i in range(self.model_settings["n_lstms"])
        ]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.target_comparator = TargetComparator(
            self.variables, variable_prefix="comparison_to_sentence")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)
Exemplo n.º 3
0
    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
        self.add_component(self.hypergraph)

        #self.question_sentence = TensorflowSentenceRepresentation(self.variables)
        #self.add_component(self.question_sentence)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.target_comparator = TargetComparator(
            self.variables, variable_prefix="comparison_to_sentence")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)
Exemplo n.º 4
0
 def initialize_as_nn(self):
     self.scoring_function_type = "neural_network"
     self.final_transformation = MultilayerPerceptron(
         [
             int(self.model_settings["lstm_hidden_state_dimension"] / 2) +
             self.model_settings["entity_embedding_dimension"] +
             self.model_settings["concatenate_scores"],
             self.model_settings["nn_hidden_state_dimension"], 1
         ],
         self.variables,
         variable_prefix=self.variable_prefix + "transformation",
         l2_scale=self.model_settings["regularization_scale"],
         dropout_rate=self.model_settings["transform_dropout"])
     self.target_comparator = TargetComparator(
         self.variables,
         variable_prefix=self.variable_prefix + "comparison_to_sentence",
         comparison="concat")
     self.sub_components = [
         self.target_comparator, self.final_transformation
     ]
Exemplo n.º 5
0
    def initialize_as_factorization(self):
        self.scoring_function_type = "factorization"
        self.use_transformation = self.model_settings["use_transformation"]
        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix=self.variable_prefix + "comparison_to_sentence",
            comparison="dot_product")
        self.sub_components = [self.target_comparator]

        if self.use_transformation:
            self.transformation = MultilayerPerceptron(
                [
                    int(self.model_settings["lstm_hidden_state_dimension"] /
                        2), self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix=self.variable_prefix + "transformation",
                l2_scale=self.model_settings["regularization_scale"],
                dropout_rate=self.model_settings["transform_dropout"])

            self.sub_components += [self.transformation]
    def initialize_graph(self):
        self.hypergraph = TensorflowHypergraphRepresentation(self.variables, edge_dropout_rate=self.model_settings["edge_dropout"])
        self.add_component(self.hypergraph)

        self.lstms = [BiLstm(self.variables, self.model_settings["word_embedding_dimension"]+1, self.model_settings["lstm_hidden_state_dimension"], variable_prefix="lstm_" + str(i)) for i in
                      range(self.model_settings["n_lstms"])]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.word_embedding = SequenceEmbedding(self.word_indexer, self.variables, variable_prefix="word", word_dropout_rate=self.model_settings["word_dropout"], is_static=self.model_settings["static_word_embeddings"])
        self.add_component(self.word_embedding)

        #self.attention = Attention(self.model_settings["word_embedding_dimension"], self.variables, variable_prefix="attention", strategy="constant_query")
        self.attention = MultiheadAttention(self.model_settings["lstm_hidden_state_dimension"], self.variables, attention_heads=4,
                                   variable_prefix="attention", strategy="constant_query", attention_dropout=self.model_settings["attention_dropout"])

        self.add_component(self.attention)

        self.target_comparator = TargetComparator(self.variables, variable_prefix="comparison_to_sentence", comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables, self.model_settings["loss"])
        self.add_component(self.decoder)

        self.candidate_scorer = NeuralNetworkOrFactorizationScorer(self.model_settings, self.variables, variable_prefix="scorer")
        self.add_component(self.candidate_scorer)

        self.hypergraph_gcn_propagation_units = [None] * self.model_settings["n_layers"]
        for layer in range(self.model_settings["n_layers"]):
            self.hypergraph_gcn_propagation_units[layer] = HypergraphGcnPropagationUnit("layer_" + str(layer),
                                                                                        self.facts,
                                                                                        self.variables,
                                                                                        self.model_settings["entity_embedding_dimension"],
                                                                                        self.hypergraph,
                                                                                        weights="identity",
                                                                                        biases="relation_specific",
                                                                                        self_weight="identity",
                                                                                        self_bias="zero",
                                                                                        add_inverse_relations=True)
            self.add_component(self.hypergraph_gcn_propagation_units[layer])

        self.sentence_to_graph_mapper = EmbeddingRetriever(self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)


        self.final_transformation = MultilayerPerceptron([int(self.model_settings["lstm_hidden_state_dimension"]/2) + self.model_settings["entity_embedding_dimension"],
                                                          self.model_settings["nn_hidden_state_dimension"],
                                                    1],
                                                   self.variables,
                                                   variable_prefix="transformation",
                                                   l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)
Exemplo n.º 7
0
class EntityEmbeddingVsBagOfWords(AbstractTensorflowModel):
    def get_preprocessor_stack_types(self):
        preprocessor_stack_types = ["hypergraph", "gold", "sentence"]
        if self.model_settings["static_entity_embeddings"]:
            preprocessor_stack_types += ["static_entity_embeddings"]
        return preprocessor_stack_types

    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
        self.add_component(self.hypergraph)

        #self.question_sentence = TensorflowSentenceRepresentation(self.variables)
        #self.add_component(self.question_sentence)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.target_comparator = TargetComparator(
            self.variables, variable_prefix="comparison_to_sentence")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)

    def set_indexers(self, indexers):
        self.word_indexer = indexers.word_indexer
        self.entity_indexer = indexers.entity_indexer

    def compute_entity_scores(self):
        self.hypergraph.entity_vertex_embeddings = self.entity_embedding.get_representations(
        )
        word_embeddings = self.word_embedding.get_representations()
        word_embedding_shape = tf.shape(word_embeddings)
        word_embeddings = tf.reshape(
            word_embeddings,
            [-1, self.model_settings["word_embedding_dimension"]])

        centroid_embeddings = self.sentence_to_graph_mapper.get_forward_embeddings(
            self.hypergraph.entity_vertex_embeddings)
        centroid_embeddings = self.centroid_transformation.transform(
            centroid_embeddings)
        word_embeddings += self.sentence_to_graph_mapper.map_backwards(
            centroid_embeddings)
        word_embeddings = tf.reshape(word_embeddings, word_embedding_shape)

        bag_of_words = tf.reduce_sum(word_embeddings, 1)

        if self.model_settings["use_transformation"]:
            bag_of_words = self.transformation.transform(bag_of_words)

        entity_embeddings = self.target_comparator.get_comparison_scores(
            bag_of_words, self.hypergraph.entity_vertex_embeddings)

        return entity_embeddings
class PathBagWithGatesVsLstm(AbstractTensorflowModel):


    def get_preprocessor_stack_types(self):
        preprocessor_stack_types = ["hypergraph", "gold", "sentence"]
        if self.model_settings["static_entity_embeddings"]:
            preprocessor_stack_types += ["static_entity_embeddings"]
        return preprocessor_stack_types

    def initialize_graph(self):
        self.hypergraph = TensorflowHypergraphRepresentation(self.variables, edge_dropout_rate=self.model_settings["edge_dropout"])
        self.add_component(self.hypergraph)

        self.lstms = [BiLstm(self.variables, self.model_settings["word_embedding_dimension"]+1, self.model_settings["lstm_hidden_state_dimension"], variable_prefix="lstm_" + str(i)) for i in
                      range(self.model_settings["n_lstms"])]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.word_embedding = SequenceEmbedding(self.word_indexer, self.variables, variable_prefix="word", word_dropout_rate=self.model_settings["word_dropout"], is_static=self.model_settings["static_word_embeddings"])
        self.add_component(self.word_embedding)

        #self.attention = Attention(self.model_settings["word_embedding_dimension"], self.variables, variable_prefix="attention", strategy="constant_query")
        self.attention = MultiheadAttention(self.model_settings["lstm_hidden_state_dimension"], self.variables, attention_heads=self.model_settings["n_attention_heads"],
                                   variable_prefix="attention", strategy="constant_query", attention_dropout=self.model_settings["attention_dropout"])

        self.add_component(self.attention)

        self.target_comparator = TargetComparator(self.variables, variable_prefix="comparison_to_sentence", comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.candidate_scorer = NeuralNetworkOrFactorizationScorer(self.model_settings, self.variables, variable_prefix="scorer")
        self.add_component(self.candidate_scorer)

        if self.model_settings["gate_input"] == "GCN":
            gate_input_dim = self.model_settings["gate_input_dim"]

            self.gate_transform = MultilayerPerceptron([1, gate_input_dim],
                                                         self.variables,
                                                         variable_prefix="gate_transformation",
                                                         l2_scale=self.model_settings["regularization_scale"],
                                                         dropout_rate=self.model_settings["transform_dropout"])
            self.add_component(self.gate_transform)

            self.gate_gcns = [None] * self.model_settings["gate_input_layers"]
            for layer in range(self.model_settings["gate_input_layers"]):
                self.gate_gcns[layer] = HypergraphGcnPropagationUnit("gate_layer_" + str(layer),
                                                                     self.facts,
                                                                     self.variables,
                                                                     gate_input_dim,
                                                                     self.hypergraph,
                                                                     weights="single",
                                                                     biases="constant",
                                                                     self_weight="full",
                                                                     self_bias="constant",
                                                                     add_inverse_relations=True,)
                self.add_component(self.gate_gcns[layer])
        else:
            gate_input_dim = 1

        self.hypergraph_gcn_propagation_units = [None] * self.model_settings["n_layers"]
        for layer in range(self.model_settings["n_layers"]):
            self.hypergraph_gcn_propagation_units[layer] = HypergraphGcnPropagationUnit("layer_" + str(layer),
                                                                                        self.facts,
                                                                                        self.variables,
                                                                                        self.model_settings["entity_embedding_dimension"],
                                                                                        self.hypergraph,
                                                                                        weights="identity",
                                                                                        biases="relation_specific",
                                                                                        self_weight="identity",
                                                                                        self_bias="zero",
                                                                                        add_inverse_relations=True,
                                                                                        gate_mode="features_given",
                                                                                        gate_input_dim=gate_input_dim)
            self.add_component(self.hypergraph_gcn_propagation_units[layer])

        self.sentence_to_graph_mapper = EmbeddingRetriever(self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)


        if False: #self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron([self.model_settings["word_embedding_dimension"],
                                                        self.model_settings["entity_embedding_dimension"]],
                                                       self.variables,
                                                       variable_prefix="transformation",
                                                       l2_scale=self.model_settings["regularization_scale"])


            self.centroid_transformation = MultilayerPerceptron([self.model_settings["entity_embedding_dimension"],
                                                                 self.model_settings["word_embedding_dimension"]],
                                                                self.variables,
                                                                variable_prefix="centroid_transformation",
                                                                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)

        self.final_transformation = MultilayerPerceptron([int(self.model_settings["lstm_hidden_state_dimension"]/2) + self.model_settings["entity_embedding_dimension"],
                                                          self.model_settings["nn_hidden_state_dimension"],
                                                    1],
                                                   self.variables,
                                                   variable_prefix="transformation",
                                                   l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)


    def set_indexers(self, indexers):
        self.word_indexer = indexers.word_indexer
        self.relation_indexer = indexers.relation_indexer
        self.entity_indexer = indexers.entity_indexer

    def compute_entity_scores(self, mode="train"):
        #entity_vertex_embeddings = self.entity_embedding.get_representations()
        #word_embedding_shape = tf.shape(word_embeddings)
        #word_embeddings = tf.reshape(word_embeddings, [-1, self.model_settings["word_embedding_dimension"]])

        #centroid_embeddings = self.sentence_to_graph_mapper.get_forward_embeddings(entity_vertex_embeddings)
        #centroid_embeddings = self.centroid_transformation.transform(centroid_embeddings)
        #word_embeddings += self.sentence_to_graph_mapper.map_backwards(centroid_embeddings)
        #word_embeddings = tf.reshape(word_embeddings, word_embedding_shape)


        if self.model_settings["gate_input"] == "GCN":
            v_features = self.gate_transform.transform(tf.expand_dims(self.hypergraph.get_vertex_scores(),1), mode=mode)
            e_features = self.gate_transform.transform(tf.expand_dims(self.hypergraph.get_event_scores(),1), mode=mode)

            self.hypergraph.update_entity_embeddings(v_features, self.model_settings["gate_input_dim"])
            self.hypergraph.update_event_embeddings(e_features, self.model_settings["gate_input_dim"])

            for gate_layer in self.gate_gcns:
                gate_layer.propagate()

            v_features = self.hypergraph.entity_vertex_embeddings
            e_features = self.hypergraph.event_vertex_embeddings
        else:
            v_features = tf.expand_dims(self.hypergraph.get_vertex_scores(),1)
            e_features = tf.expand_dims(self.hypergraph.get_event_scores(),1)

        self.hypergraph.initialize_zero_embeddings(self.model_settings["entity_embedding_dimension"])
        for hgpu in self.hypergraph_gcn_propagation_units:
            hgpu.set_gate_features(v_features, "entities")
            hgpu.set_gate_features(e_features, "events")
            hgpu.propagate()
        entity_scores = self.hypergraph.entity_vertex_embeddings
        entity_scores = tf.concat([entity_scores, tf.expand_dims(self.hypergraph.get_vertex_scores(),1)], axis=1)

        word_embeddings = self.word_embedding.get_representations(mode=mode)

        ###

        word_embedding_shape = tf.shape(word_embeddings)
        word_embeddings = tf.reshape(word_embeddings, [-1, self.model_settings["word_embedding_dimension"]])

        centroid_embeddings = self.sentence_to_graph_mapper.get_forward_embeddings(tf.ones([tf.shape(entity_scores)[0], 1]))
        word_embeddings = tf.concat([word_embeddings, self.sentence_to_graph_mapper.map_backwards(centroid_embeddings)], axis=1)
        word_embeddings = tf.reshape(word_embeddings, [word_embedding_shape[0],-1,self.model_settings["word_embedding_dimension"]+1])

        ###

        for lstm in self.lstms:
            word_embeddings = lstm.transform_sequences(word_embeddings)
        sentence_vector = self.attention.attend(word_embeddings, mode=mode)

        return self.candidate_scorer.score(sentence_vector, entity_scores, mode=mode)

        #if self.model_settings["use_transformation"]:
        #    bag_of_words = self.transformation.transform(bag_of_words)

        #hidden = self.target_comparator.get_comparison_scores(sentence_vector, entity_scores)
        #entity_scores = tf.squeeze(self.final_transformation.transform(hidden))

        #entity_scores = self.target_comparator.get_comparison_scores(bag_of_words,
        #                                                             entity_scores)

        #return entity_scores
Exemplo n.º 9
0
    def initialize_graph(self):
        self.hypergraph = TensorflowHypergraphRepresentation(
            self.variables,
            edge_dropout_rate=self.model_settings["edge_dropout"])
        self.add_component(self.hypergraph)

        self.lstms = [
            BiLstm(self.variables,
                   self.model_settings["word_embedding_dimension"] + 1,
                   self.model_settings["lstm_hidden_state_dimension"],
                   variable_prefix="lstm_" + str(i))
            for i in range(self.model_settings["n_lstms"])
        ]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.word_embedding = SequenceEmbedding(
            self.word_indexer,
            self.variables,
            variable_prefix="word",
            word_dropout_rate=self.model_settings["word_dropout"],
            is_static=self.model_settings["static_word_embeddings"])
        self.add_component(self.word_embedding)

        #self.attention = Attention(self.model_settings["word_embedding_dimension"], self.variables, variable_prefix="attention", strategy="constant_query")
        self.attention = MultiheadAttention(
            self.model_settings["lstm_hidden_state_dimension"],
            self.variables,
            attention_heads=self.model_settings["n_attention_heads"],
            variable_prefix="attention",
            strategy="constant_query",
            attention_dropout=self.model_settings["attention_dropout"])

        self.add_component(self.attention)

        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix="comparison_to_sentence",
            comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables,
                                      self.model_settings["loss"])
        self.add_component(self.decoder)

        self.candidate_scorer = NeuralNetworkOrFactorizationScorer(
            self.model_settings, self.variables, variable_prefix="scorer")
        self.add_component(self.candidate_scorer)

        gcn_factory = GcnFactory()
        gcn_settings = {
            "n_layers":
            self.model_settings["n_layers"],
            "embedding_dimension":
            self.model_settings["entity_embedding_dimension"],
            "n_relation_types":
            self.facts.number_of_relation_types
        }

        self.hypergraph_gcn_propagation_units = gcn_factory.get_gated_type_only_gcn(
            self.hypergraph, self.variables, gcn_settings)
        for layer in self.hypergraph_gcn_propagation_units:
            self.add_component(layer)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        self.final_transformation = MultilayerPerceptron(
            [
                int(self.model_settings["lstm_hidden_state_dimension"] / 2) +
                self.model_settings["entity_embedding_dimension"],
                self.model_settings["nn_hidden_state_dimension"], 1
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)
Exemplo n.º 10
0
    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
        self.add_component(self.hypergraph)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix="comparison_to_sentence",
            comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.hypergraph_gcn_propagation_units = [
            None
        ] * self.model_settings["n_layers"]
        for layer in range(self.model_settings["n_layers"]):
            self.hypergraph_gcn_propagation_units[
                layer] = HypergraphGcnPropagationUnit(
                    "layer_" + str(layer),
                    self.facts,
                    self.variables,
                    self.model_settings["entity_embedding_dimension"],
                    self.hypergraph,
                    weights="identity",
                    biases="relation_specific",
                    self_weight="identity",
                    self_bias="zero",
                    add_inverse_relations=False)
            self.add_component(self.hypergraph_gcn_propagation_units[layer])

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if False:  #self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)

        self.final_transformation = MultilayerPerceptron(
            [
                self.model_settings["word_embedding_dimension"] +
                self.model_settings["entity_embedding_dimension"],
                4 * self.model_settings["entity_embedding_dimension"], 1
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)
Exemplo n.º 11
0
class PathBagVsBagOfWords(AbstractTensorflowModel):
    def get_preprocessor_stack_types(self):
        preprocessor_stack_types = ["hypergraph", "gold", "sentence"]
        if self.model_settings["static_entity_embeddings"]:
            preprocessor_stack_types += ["static_entity_embeddings"]
        return preprocessor_stack_types

    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
        self.add_component(self.hypergraph)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix="comparison_to_sentence",
            comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.hypergraph_gcn_propagation_units = [
            None
        ] * self.model_settings["n_layers"]
        for layer in range(self.model_settings["n_layers"]):
            self.hypergraph_gcn_propagation_units[
                layer] = HypergraphGcnPropagationUnit(
                    "layer_" + str(layer),
                    self.facts,
                    self.variables,
                    self.model_settings["entity_embedding_dimension"],
                    self.hypergraph,
                    weights="identity",
                    biases="relation_specific",
                    self_weight="identity",
                    self_bias="zero",
                    add_inverse_relations=False)
            self.add_component(self.hypergraph_gcn_propagation_units[layer])

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if False:  #self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)

        self.final_transformation = MultilayerPerceptron(
            [
                self.model_settings["word_embedding_dimension"] +
                self.model_settings["entity_embedding_dimension"],
                4 * self.model_settings["entity_embedding_dimension"], 1
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)

    def set_indexers(self, indexers):
        self.word_indexer = indexers.word_indexer
        self.relation_indexer = indexers.relation_indexer
        self.entity_indexer = indexers.entity_indexer

    def compute_entity_scores(self):
        #entity_vertex_embeddings = self.entity_embedding.get_representations()
        word_embeddings = self.word_embedding.get_representations()
        #word_embedding_shape = tf.shape(word_embeddings)
        #word_embeddings = tf.reshape(word_embeddings, [-1, self.model_settings["word_embedding_dimension"]])

        #centroid_embeddings = self.sentence_to_graph_mapper.get_forward_embeddings(entity_vertex_embeddings)
        #centroid_embeddings = self.centroid_transformation.transform(centroid_embeddings)
        #word_embeddings += self.sentence_to_graph_mapper.map_backwards(centroid_embeddings)
        #word_embeddings = tf.reshape(word_embeddings, word_embedding_shape)

        self.hypergraph.initialize_zero_embeddings(
            self.model_settings["entity_embedding_dimension"])
        for hgpu in self.hypergraph_gcn_propagation_units:
            hgpu.propagate()

        entity_scores = self.hypergraph.entity_vertex_embeddings
        bag_of_words = tf.reduce_sum(word_embeddings, 1)

        #if self.model_settings["use_transformation"]:
        #    bag_of_words = self.transformation.transform(bag_of_words)

        hidden = self.target_comparator.get_comparison_scores(
            bag_of_words, entity_scores)
        entity_scores = tf.squeeze(self.final_transformation.transform(hidden))

        #entity_scores = tf.Print(entity_scores, [entity_scores], summarize=25, message="entity_scores: ")

        #entity_scores = self.target_comparator.get_comparison_scores(bag_of_words,
        #                                                             entity_scores)

        return entity_scores
class EntityEmbeddingVsLstm(AbstractTensorflowModel):
    def get_preprocessor_stack_types(self):
        preprocessor_stack_types = ["hypergraph", "gold", "sentence"]
        if self.model_settings["static_entity_embeddings"]:
            preprocessor_stack_types += ["static_entity_embeddings"]
        return preprocessor_stack_types

    def set_indexers(self, indexers):
        self.word_indexer = indexers.word_indexer
        self.entity_indexer = indexers.entity_indexer

    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.lstms = [
            BiLstm(self.variables,
                   self.model_settings["word_embedding_dimension"],
                   variable_prefix="lstm_" + str(i))
            for i in range(self.model_settings["n_lstms"])
        ]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.target_comparator = TargetComparator(
            self.variables, variable_prefix="comparison_to_sentence")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)

    def compute_entity_scores(self):
        entity_vertex_embeddings = self.entity_embedding.get_representations()
        word_embeddings = self.word_embedding.get_representations()
        word_embedding_shape = tf.shape(word_embeddings)
        word_embeddings = tf.reshape(
            word_embeddings,
            [-1, self.model_settings["word_embedding_dimension"]])

        centroid_embeddings = self.sentence_to_graph_mapper.get_forward_embeddings(
            entity_vertex_embeddings)
        centroid_embeddings = self.centroid_transformation.transform(
            centroid_embeddings)
        word_embeddings += self.sentence_to_graph_mapper.map_backwards(
            centroid_embeddings)
        word_embeddings = tf.reshape(word_embeddings, word_embedding_shape)

        for lstm in self.lstms:
            word_embeddings = lstm.transform_sequences(word_embeddings)

        attention_scores = self.lstm_attention.transform_sequences(
            word_embeddings)
        attention_values = tf.nn.softmax(attention_scores, dim=1)
        attention_weighted_word_scores = word_embeddings * attention_values
        target_vector = tf.reduce_sum(attention_weighted_word_scores, 1)

        if self.model_settings["use_transformation"]:
            target_vector = tf.nn.relu(target_vector)
            target_vector = self.transformation.transform(target_vector)

        entity_scores = self.target_comparator.get_comparison_scores(
            target_vector, entity_vertex_embeddings)

        return entity_scores
Exemplo n.º 13
0
class NeuralNetworkOrFactorizationScorer(AbstractComponent):

    scoring_function_type = None
    use_transformation = None

    def __init__(self, model_settings, variables, variable_prefix=""):
        self.model_settings = model_settings

        self.variable_prefix = variable_prefix
        if self.variable_prefix != "":
            self.variable_prefix += "_"

        self.variables = variables

        if self.model_settings["scoring_function"] == "neural_network":
            self.initialize_as_nn()
        elif self.model_settings["scoring_function"] == "factorization":
            self.initialize_as_factorization()

    def initialize_as_nn(self):
        self.scoring_function_type = "neural_network"
        self.final_transformation = MultilayerPerceptron(
            [
                int(self.model_settings["lstm_hidden_state_dimension"] / 2) +
                self.model_settings["entity_embedding_dimension"] +
                self.model_settings["concatenate_scores"],
                self.model_settings["nn_hidden_state_dimension"], 1
            ],
            self.variables,
            variable_prefix=self.variable_prefix + "transformation",
            l2_scale=self.model_settings["regularization_scale"],
            dropout_rate=self.model_settings["transform_dropout"])
        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix=self.variable_prefix + "comparison_to_sentence",
            comparison="concat")
        self.sub_components = [
            self.target_comparator, self.final_transformation
        ]

    def initialize_as_factorization(self):
        self.scoring_function_type = "factorization"
        self.use_transformation = self.model_settings["use_transformation"]
        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix=self.variable_prefix + "comparison_to_sentence",
            comparison="dot_product")
        self.sub_components = [self.target_comparator]

        if self.use_transformation:
            self.transformation = MultilayerPerceptron(
                [
                    int(self.model_settings["lstm_hidden_state_dimension"] /
                        2), self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix=self.variable_prefix + "transformation",
                l2_scale=self.model_settings["regularization_scale"],
                dropout_rate=self.model_settings["transform_dropout"])

            self.sub_components += [self.transformation]

    def score(self, sentence_embeddings, entity_embeddings, mode="train"):
        if self.scoring_function_type == "neural_network":
            return self.score_nn(sentence_embeddings,
                                 entity_embeddings,
                                 mode=mode)
        else:
            return self.score_factorization(sentence_embeddings,
                                            entity_embeddings,
                                            mode=mode)

    def score_nn(self, sentence_embeddings, entity_embeddings, mode="train"):
        hidden = self.target_comparator.get_comparison_scores(
            sentence_embeddings, entity_embeddings)
        entity_scores = tf.squeeze(
            self.final_transformation.transform(hidden, mode=mode))

        return entity_scores

    def score_factorization(self,
                            sentence_embeddings,
                            entity_embeddings,
                            mode="train"):
        if self.model_settings["use_transformation"]:
            sentence_embeddings = self.transformation.transform(
                sentence_embeddings, mode=mode)

        entity_scores = self.target_comparator.get_comparison_scores(
            sentence_embeddings, entity_embeddings)

        return entity_scores

    def prepare_tensorflow_variables(self, mode="train"):
        for component in self.sub_components:
            component.prepare_tensorflow_variables(mode=mode)

    def get_regularization_term(self):
        reg = 0
        for component in self.sub_components:
            reg += component.get_regularization_term()
        return reg

    def handle_variable_assignment(self, batch, mode):
        for component in self.sub_components:
            component.handle_variable_assignment(batch, mode)
Exemplo n.º 14
0
class EntityEmbeddingVsGold(AbstractTensorflowModel):
    def get_preprocessor_stack_types(self):
        preprocessor_stack_types = ["hypergraph", "gold", "sentence"]
        if self.model_settings["static_entity_embeddings"]:
            preprocessor_stack_types += ["static_entity_embeddings"]
        return preprocessor_stack_types

    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
        self.add_component(self.hypergraph)

        self.mean_gold_embedding_retriever = MeanGoldEmbeddingRetriever(
            self.variables, variable_prefix="gold_lookup")
        self.add_component(self.mean_gold_embedding_retriever)

        #self.question_sentence = TensorflowSentenceRepresentation(self.variables)
        #self.add_component(self.question_sentence)

        #self.word_embedding = SequenceEmbedding(self.word_indexer, self.variables, variable_prefix="word")
        #self.add_component(self.word_embedding)

        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix="comparison_to_sentence",
            comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        self.transformation = MultilayerPerceptron(
            [
                self.model_settings["entity_embedding_dimension"],
                self.model_settings["entity_embedding_dimension"]
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.transformation)

        self.vertex_transformation = MultilayerPerceptron(
            [
                self.model_settings["entity_embedding_dimension"],
                self.model_settings["entity_embedding_dimension"]
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.vertex_transformation)

        self.final_transformation = MultilayerPerceptron(
            [
                2 * self.model_settings["entity_embedding_dimension"],
                4 * self.model_settings["entity_embedding_dimension"], 1
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)

    def set_indexers(self, indexers):
        self.entity_indexer = indexers.entity_indexer

    def compute_entity_scores(self):
        self.hypergraph.entity_vertex_embeddings = self.entity_embedding.get_representations(
        )
        self.hypergraph.entity_vertex_embeddings = tf.Print(
            self.hypergraph.entity_vertex_embeddings,
            [self.hypergraph.entity_vertex_embeddings],
            message="embeddings",
            summarize=100)
        gold_embeddings = self.mean_gold_embedding_retriever.get_representations(
            self.hypergraph.entity_vertex_embeddings)
        #gold_embeddings = tf.Print(gold_embeddings, [gold_embeddings], message="Gold: ", summarize=5)

        #gold_embeddings = self.transformation.transform(gold_embeddings)
        vertex_embeddings = self.hypergraph.entity_vertex_embeddings  #self.vertex_transformation.transform(self.hypergraph.entity_vertex_embeddings)

        #gold_embeddings = tf.Print(gold_embeddings, [self.hypergraph.entity_vertex_embeddings], message="Vertices: ", summarize=100)

        hidden = self.target_comparator.get_comparison_scores(
            gold_embeddings, vertex_embeddings)
        entity_scores = tf.squeeze(self.final_transformation.transform(hidden))

        entity_scores = tf.Print(entity_scores, [entity_scores],
                                 summarize=25,
                                 message="entity_scores: ")

        #entity_scores = tf.Print(entity_scores, [entity_scores], message="Scores: ", summarize=25)

        return entity_scores
Exemplo n.º 15
0
class MultiheadAttention(AbstractComponent):

    query = None
    strategy = None
    heads = None
    input_dimension = None
    variable_prefix = None
    variables = None
    attention_dropout = None

    def __init__(self,
                 input_dimension,
                 variables,
                 attention_heads=1,
                 variable_prefix="",
                 strategy=None,
                 attention_dropout=0.0):
        self.strategy = strategy
        self.input_dimension = input_dimension
        self.heads = attention_heads

        self.variable_prefix = variable_prefix
        if self.variable_prefix != "":
            self.variable_prefix += "_"

        self.variables = variables

        self.linear_key = MultilayerPerceptron(
            [int(0.5 * self.input_dimension),
             int(0.5 * self.input_dimension)], self.variables,
            self.variable_prefix + "_key_transform")
        self.linear_value = MultilayerPerceptron(
            [int(0.5 * self.input_dimension),
             int(0.5 * self.input_dimension)], self.variables,
            self.variable_prefix + "_value_transform")
        self.attention_dropout = attention_dropout

    def attend(self, padded_sequence_matrix, mode="train"):
        key_matrix, value_matrix = tf.split(
            padded_sequence_matrix,
            [int(0.5 * self.input_dimension),
             int(0.5 * self.input_dimension)], 2)
        previous_shape = tf.shape(key_matrix)

        transformed_key = self.linear_key.transform(
            tf.reshape(key_matrix,
                       [previous_shape[0] * previous_shape[1], -1]))
        transformed_value = self.linear_value.transform(
            tf.reshape(value_matrix,
                       [previous_shape[0] * previous_shape[1], -1]))

        dim = int(0.5 * self.input_dimension / self.heads)
        print([previous_shape[0], self.heads, previous_shape[1], dim])
        transformed_key = tf.reshape(
            transformed_key,
            [previous_shape[0], self.heads, previous_shape[1], dim])
        transformed_value = tf.reshape(
            transformed_value,
            [previous_shape[0], self.heads, previous_shape[1], dim])
        norm_factor = np.sqrt(dim)

        attention_weights = tf.nn.softmax(
            tf.reduce_sum(transformed_key * self.query, axis=3) / norm_factor,
            dim=-1)
        #attention_weights = tf.Print(attention_weights, [attention_weights], message="attention_weights", summarize=100)
        attention_weights = tf.expand_dims(attention_weights, 3)

        if mode == "train" and self.attention_dropout > 0.0:
            attention_weights = tf.nn.dropout(attention_weights,
                                              1 - self.attention_dropout)

        attention_weighted_matrix = transformed_value * attention_weights

        weighted_value_matrix = tf.reduce_sum(attention_weighted_matrix, 2)
        return_value = tf.reshape(weighted_value_matrix,
                                  [previous_shape[0], -1])

        return return_value

    def prepare_tensorflow_variables(self, mode="train"):
        weight_initializer = np.random.uniform(
            -0.1,
            0.1,
            size=(self.heads, 1, int(0.5 * self.input_dimension /
                                     self.heads))).astype(np.float32)
        self.query = tf.Variable(weight_initializer,
                                 name=self.variable_prefix + "_query")

        self.linear_key.prepare_tensorflow_variables(mode=mode)
        self.linear_value.prepare_tensorflow_variables(mode=mode)