def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.lstms = [
            BiLstm(self.variables,
                   self.model_settings["word_embedding_dimension"],
                   variable_prefix="lstm_" + str(i))
            for i in range(self.model_settings["n_lstms"])
        ]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.target_comparator = TargetComparator(
            self.variables, variable_prefix="comparison_to_sentence")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)
Exemple #2
0
    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
        self.add_component(self.hypergraph)

        #self.question_sentence = TensorflowSentenceRepresentation(self.variables)
        #self.add_component(self.question_sentence)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.target_comparator = TargetComparator(
            self.variables, variable_prefix="comparison_to_sentence")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)
    def initialize_graph(self):
        self.hypergraph = TensorflowHypergraphRepresentation(self.variables, edge_dropout_rate=self.model_settings["edge_dropout"])
        self.add_component(self.hypergraph)

        self.lstms = [BiLstm(self.variables, self.model_settings["word_embedding_dimension"]+1, self.model_settings["lstm_hidden_state_dimension"], variable_prefix="lstm_" + str(i)) for i in
                      range(self.model_settings["n_lstms"])]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.word_embedding = SequenceEmbedding(self.word_indexer, self.variables, variable_prefix="word", word_dropout_rate=self.model_settings["word_dropout"], is_static=self.model_settings["static_word_embeddings"])
        self.add_component(self.word_embedding)

        #self.attention = Attention(self.model_settings["word_embedding_dimension"], self.variables, variable_prefix="attention", strategy="constant_query")
        self.attention = MultiheadAttention(self.model_settings["lstm_hidden_state_dimension"], self.variables, attention_heads=4,
                                   variable_prefix="attention", strategy="constant_query", attention_dropout=self.model_settings["attention_dropout"])

        self.add_component(self.attention)

        self.target_comparator = TargetComparator(self.variables, variable_prefix="comparison_to_sentence", comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables, self.model_settings["loss"])
        self.add_component(self.decoder)

        self.candidate_scorer = NeuralNetworkOrFactorizationScorer(self.model_settings, self.variables, variable_prefix="scorer")
        self.add_component(self.candidate_scorer)

        self.hypergraph_gcn_propagation_units = [None] * self.model_settings["n_layers"]
        for layer in range(self.model_settings["n_layers"]):
            self.hypergraph_gcn_propagation_units[layer] = HypergraphGcnPropagationUnit("layer_" + str(layer),
                                                                                        self.facts,
                                                                                        self.variables,
                                                                                        self.model_settings["entity_embedding_dimension"],
                                                                                        self.hypergraph,
                                                                                        weights="identity",
                                                                                        biases="relation_specific",
                                                                                        self_weight="identity",
                                                                                        self_bias="zero",
                                                                                        add_inverse_relations=True)
            self.add_component(self.hypergraph_gcn_propagation_units[layer])

        self.sentence_to_graph_mapper = EmbeddingRetriever(self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)


        self.final_transformation = MultilayerPerceptron([int(self.model_settings["lstm_hidden_state_dimension"]/2) + self.model_settings["entity_embedding_dimension"],
                                                          self.model_settings["nn_hidden_state_dimension"],
                                                    1],
                                                   self.variables,
                                                   variable_prefix="transformation",
                                                   l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)
Exemple #4
0
    def initialize_graph(self):
        self.hypergraph = TensorflowHypergraphRepresentation(
            self.variables,
            edge_dropout_rate=self.model_settings["edge_dropout"])
        self.add_component(self.hypergraph)

        self.lstms = [
            BiLstm(self.variables,
                   self.model_settings["word_embedding_dimension"] + 1,
                   self.model_settings["lstm_hidden_state_dimension"],
                   variable_prefix="lstm_" + str(i))
            for i in range(self.model_settings["n_lstms"])
        ]
        for lstm in self.lstms:
            self.add_component(lstm)

        self.word_embedding = SequenceEmbedding(
            self.word_indexer,
            self.variables,
            variable_prefix="word",
            word_dropout_rate=self.model_settings["word_dropout"],
            is_static=self.model_settings["static_word_embeddings"])
        self.add_component(self.word_embedding)

        #self.attention = Attention(self.model_settings["word_embedding_dimension"], self.variables, variable_prefix="attention", strategy="constant_query")
        self.attention = MultiheadAttention(
            self.model_settings["lstm_hidden_state_dimension"],
            self.variables,
            attention_heads=self.model_settings["n_attention_heads"],
            variable_prefix="attention",
            strategy="constant_query",
            attention_dropout=self.model_settings["attention_dropout"])

        self.add_component(self.attention)

        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix="comparison_to_sentence",
            comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables,
                                      self.model_settings["loss"])
        self.add_component(self.decoder)

        self.candidate_scorer = NeuralNetworkOrFactorizationScorer(
            self.model_settings, self.variables, variable_prefix="scorer")
        self.add_component(self.candidate_scorer)

        gcn_factory = GcnFactory()
        gcn_settings = {
            "n_layers":
            self.model_settings["n_layers"],
            "embedding_dimension":
            self.model_settings["entity_embedding_dimension"],
            "n_relation_types":
            self.facts.number_of_relation_types
        }

        self.hypergraph_gcn_propagation_units = gcn_factory.get_gated_type_only_gcn(
            self.hypergraph, self.variables, gcn_settings)
        for layer in self.hypergraph_gcn_propagation_units:
            self.add_component(layer)

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        self.final_transformation = MultilayerPerceptron(
            [
                int(self.model_settings["lstm_hidden_state_dimension"] / 2) +
                self.model_settings["entity_embedding_dimension"],
                self.model_settings["nn_hidden_state_dimension"], 1
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)
Exemple #5
0
    def initialize_graph(self):
        if not self.model_settings["static_entity_embeddings"]:
            self.entity_embedding = VectorEmbedding(self.entity_indexer,
                                                    self.variables,
                                                    variable_prefix="entity")
            self.add_component(self.entity_embedding)
        else:
            self.entity_embedding = StaticVectorEmbedding(
                self.entity_indexer, self.variables, variable_prefix="entity")
            self.add_component(self.entity_embedding)

        self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
        self.add_component(self.hypergraph)

        self.word_embedding = SequenceEmbedding(self.word_indexer,
                                                self.variables,
                                                variable_prefix="word")
        self.add_component(self.word_embedding)

        self.target_comparator = TargetComparator(
            self.variables,
            variable_prefix="comparison_to_sentence",
            comparison="concat")
        self.add_component(self.target_comparator)

        self.decoder = SoftmaxDecoder(self.variables)
        self.add_component(self.decoder)

        self.hypergraph_gcn_propagation_units = [
            None
        ] * self.model_settings["n_layers"]
        for layer in range(self.model_settings["n_layers"]):
            self.hypergraph_gcn_propagation_units[
                layer] = HypergraphGcnPropagationUnit(
                    "layer_" + str(layer),
                    self.facts,
                    self.variables,
                    self.model_settings["entity_embedding_dimension"],
                    self.hypergraph,
                    weights="identity",
                    biases="relation_specific",
                    self_weight="identity",
                    self_bias="zero",
                    add_inverse_relations=False)
            self.add_component(self.hypergraph_gcn_propagation_units[layer])

        self.sentence_to_graph_mapper = EmbeddingRetriever(
            self.variables, duplicate_policy="sum", variable_prefix="mapper")
        self.add_component(self.sentence_to_graph_mapper)

        if False:  #self.model_settings["use_transformation"]:
            self.transformation = MultilayerPerceptron(
                [
                    self.model_settings["word_embedding_dimension"],
                    self.model_settings["entity_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="transformation",
                l2_scale=self.model_settings["regularization_scale"])

            self.centroid_transformation = MultilayerPerceptron(
                [
                    self.model_settings["entity_embedding_dimension"],
                    self.model_settings["word_embedding_dimension"]
                ],
                self.variables,
                variable_prefix="centroid_transformation",
                l2_scale=self.model_settings["regularization_scale"])
            self.add_component(self.centroid_transformation)
            self.add_component(self.transformation)

        self.final_transformation = MultilayerPerceptron(
            [
                self.model_settings["word_embedding_dimension"] +
                self.model_settings["entity_embedding_dimension"],
                4 * self.model_settings["entity_embedding_dimension"], 1
            ],
            self.variables,
            variable_prefix="transformation",
            l2_scale=self.model_settings["regularization_scale"])
        self.add_component(self.final_transformation)