def compute_task_output_new(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        batch_features_2: Dict[str, tf.Tensor],
        final_node_representations_2:tf.Tensor,
        training: bool,
    ) -> Any:
        per_graph_results_1 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features["node_features"], final_node_representations], axis=-1
                ),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        #second
        per_graph_results_2 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features_2["node_features"], final_node_representations_2], axis=-1
                ),
                node_to_graph_map=batch_features_2["node_to_graph_map"],
                num_graphs=batch_features_2["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        #concat
        per_graph_results_all=tf.concat([per_graph_results_1, per_graph_results_2], axis=1)

        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results_all
        )  # Shape [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)
    def build(self, input_shapes):
        with tf.name_scope(self._name):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self.
                _params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] +
                         self._params["gnn_hidden_dim"])),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                ))

            self._graph_repr_to_classification_layer = tf.keras.layers.Dense(
                units=1, activation=tf.nn.sigmoid, use_bias=True)
            self._graph_repr_to_classification_layer.build(
                tf.TensorShape(
                    (None, self._params["graph_aggregation_num_heads"])))

        super().build(input_shapes)
Esempio n. 3
0
    def build(self, input_shapes):
        if self._params["use_intermediate_gnn_results"]:
            # We get the initial GNN input + results for all layers:
            node_repr_size = (input_shapes["node_features"][-1] +
                              self._params["gnn_hidden_dim"] *
                              self._params["gnn_num_layers"])
        else:
            node_repr_size = (input_shapes["node_features"][-1] +
                              self._params["gnn_hidden_dim"])

        node_to_graph_repr_input = NodesToGraphRepresentationInput(
            node_embeddings=tf.TensorShape((None, node_repr_size)),
            node_to_graph_map=tf.TensorShape((None)),
            num_graphs=tf.TensorShape(()),
        )

        with tf.name_scope(self.__class__.__name__):
            with tf.name_scope("graph_representation_computation"):
                with tf.name_scope("weighted_avg"):
                    self._weighted_avg_of_nodes_to_graph_repr.build(
                        node_to_graph_repr_input)
                with tf.name_scope("weighted_sum"):
                    self._weighted_sum_of_nodes_to_graph_repr.build(
                        node_to_graph_repr_input)

            self._regression_mlp.build(
                tf.TensorShape(
                    (None, 2 * self._params["graph_aggregation_output_size"])))

        super().build(input_shapes)
Esempio n. 4
0
    def build(self, input_shapes):
        with tf.name_scope(self.__class__.__name__):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self.
                _params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] +
                         self._params["gnn_hidden_dim"])),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                ))

        super().build(input_shapes)
Esempio n. 5
0
    def build(self, input_shapes: Dict[str, Any]):
        graph_params = {
            name[4:]: value for name, value in self._params.items() if name.startswith("gnn_")
        }
        self.embedding = tf.keras.layers.Embedding(self.vocab_size, self._params["token_embedding_size"])
        self._gnn = GNN(graph_params)
        self._gnn.build(
            GNNInput(
                node_features=self.get_initial_node_feature_shape(input_shapes),
                adjacency_lists=tuple(
                    input_shapes[f"adjacency_list_{edge_type_idx}"]
                    for edge_type_idx in range(self._num_edge_types)
                ),
                node_to_graph_map=tf.TensorShape((None,)),
                num_graphs=tf.TensorShape(()),
            )
        )

        with tf.name_scope(self._name):
          self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
              graph_representation_size=self._params["graph_aggregation_size"],
              num_heads=self._params["graph_aggregation_num_heads"],
              scoring_mlp_layers=self._params["graph_aggregation_hidden_layers"],
              scoring_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
              transformation_mlp_layers=self._params["graph_aggregation_hidden_layers"],
              transformation_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
          )
          self._node_to_graph_repr_layer.build(
              NodesToGraphRepresentationInput(
                  node_embeddings=tf.TensorShape(
                      (None, input_shapes["node_features"][-1] + self._params["gnn_hidden_dim"])
                  ),
                  node_to_graph_map=tf.TensorShape((None)),
                  num_graphs=tf.TensorShape(()),
              )
          )

          self._graph_repr_layer = tf.keras.layers.Dense(
              self._params["graph_encoding_size"], use_bias=True
          )
          self._graph_repr_layer.build(
              tf.TensorShape((None, self._params["graph_aggregation_size"]))
          )
        super().build([])
Esempio n. 6
0
    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        training: bool,
    ) -> Any:
        per_graph_results = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features["node_features"], final_node_representations
                ],
                                          axis=-1),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        per_graph_results = tf.reduce_sum(per_graph_results,
                                          axis=-1)  # Shape [G]

        return per_graph_results
Esempio n. 7
0
    def _compute_per_node_graph_representations(
            self, inputs: GraphGlobalExchangeInput, training: bool = False):
        cur_graph_representations = self._node_to_graph_representation_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=inputs.node_embeddings,
                node_to_graph_map=inputs.node_to_graph_map,
                num_graphs=inputs.num_graphs,
            ),
            training=training,
        )  # Shape [G, hidden_dim]

        per_node_graph_representations = gather_dense_gradient(
            cur_graph_representations,
            inputs.node_to_graph_map)  # Shape [V, hidden_dim]

        if training:
            per_node_graph_representations = tf.nn.dropout(
                per_node_graph_representations, rate=self._dropout_rate)

        return per_node_graph_representations
Esempio n. 8
0
    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: Union[tf.Tensor, Tuple[tf.Tensor, List[tf.Tensor]]],
        training: bool,
    ) -> Any:
        if self._params["use_intermediate_gnn_results"]:
            _, intermediate_node_representations = final_node_representations
            # We want to skip the first "intermediate" representation, which is the output of
            # the initial feature -> GNN input layer:
            node_representations = tf.concat(
                (batch_features["node_features"],)
                + intermediate_node_representations[1:],
                axis=-1,
            )
        else:
            node_representations = tf.concat(
                [batch_features["node_features"], final_node_representations], axis=-1
            )

        graph_representation_layer_input = NodesToGraphRepresentationInput(
            node_embeddings=node_representations,
            node_to_graph_map=batch_features["node_to_graph_map"],
            num_graphs=batch_features["num_graphs_in_batch"],
        )
        weighted_avg_graph_repr = self._weighted_avg_of_nodes_to_graph_repr(
            graph_representation_layer_input, training=training
        )
        weighted_sum_graph_repr = self._weighted_sum_of_nodes_to_graph_repr(
            graph_representation_layer_input, training=training
        )

        graph_representations = tf.concat(
            [weighted_avg_graph_repr, weighted_sum_graph_repr], axis=-1
        )  # shape: [G, GD]

        per_graph_results = self._regression_mlp(
            graph_representations, training=training
        )  # shape: [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)
Esempio n. 9
0
    def build(self, tensor_shapes: GraphGlobalExchangeInput):
        """Build the various layers in the model.

        Args:
            tensor_shapes: A GraphGlobalExchangeInput of tensor shapes.

        Returns:
            Nothing, but initialises the layers in the model based on the tensor shapes given.
        """
        self._node_to_graph_representation_layer = WeightedSumGraphRepresentation(
            graph_representation_size=self._hidden_dim,
            weighting_fun=self._weighting_fun,
            num_heads=self._num_heads,
            scoring_mlp_layers=[self._hidden_dim],
        )
        self._node_to_graph_representation_layer.build(
            NodesToGraphRepresentationInput(
                node_embeddings=tensor_shapes.node_embeddings,
                node_to_graph_map=tensor_shapes.node_to_graph_map,
                num_graphs=tensor_shapes.num_graphs,
            ))

        super().build(tensor_shapes)
    def compute_task_output_new(
            self,
            batch_features: Dict[str, tf.Tensor],
            final_node_representations: tf.Tensor,
            batch_features_2: Dict[str, tf.Tensor],
            final_node_representations_2: tf.Tensor,
            batch_features_3: Dict[str, tf.Tensor],
            training: bool,
    ) -> Any:
        per_graph_results_1 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features["node_features"], final_node_representations], axis=-1
                ),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        # second
        per_graph_results_2 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features_2["node_features"], final_node_representations_2], axis=-1
                ),
                node_to_graph_map=batch_features_2["node_to_graph_map"],
                num_graphs=batch_features_2["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]

        # ------------------------------------------------------------------

        per_graph_results_all = tf.concat([per_graph_results_1, per_graph_results_2], axis=1)  # 先拼接前两个 10行32维
        # print("拼接的前两个", per_graph_results_all)
        with open("embeding.log", "w") as fd:
            fd.write(str(per_graph_results_all))

        '''--------------------------------------------------'''

        # {"Property": 0.0, "graph": {"add":[1.25,0.36], "node_features": [[0.023, 40, 1.5590395, 0.37866586,
        per_graph_results_3 = tf.reshape([batch_features_3["node_features"][0][0],batch_features_3["node_features"][0][2]], (1, 2))
        #设置一行两列格式 tf.Tensor([[0.99]], shape=(1, 1), dtype=float32)
        i = tf.cast(batch_features_3["node_features"][0][1],tf.int32)

        #tf.Tensor([[0.505]], shape=(1, 1), dtype=float32)
        while(i<batch_features_3["node_features"].shape[0]):
            per_graph_results_3 = tf.concat(
                [per_graph_results_3, tf.reshape([batch_features_3["node_features"][i][0],batch_features_3["node_features"][i][2]], (1, 2))], axis=0)
            i = i + tf.cast(batch_features_3["node_features"][i][1],tf.int32)



        per_graph_results_all = tf.concat([per_graph_results_all, per_graph_results_3], axis=1)
        # print("拼接前三个", per_graph_results_all)
#         with open("embeding22.log", "a") as fd:
#             fd.write(str(per_graph_results_all))

        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results_all
        )  # Shape [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)
Esempio n. 11
0
    def compute_task_output_new(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        batch_features_2: Dict[str, tf.Tensor],
        final_node_representations_2: tf.Tensor,
        batch_features_3: Dict[str, tf.Tensor],
        training: bool,
    ) -> Any:
        per_graph_results_1 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features["node_features"], final_node_representations
                ],
                                          axis=-1),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        #second
        per_graph_results_2 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features_2["node_features"],
                    final_node_representations_2
                ],
                                          axis=-1),
                node_to_graph_map=batch_features_2["node_to_graph_map"],
                num_graphs=batch_features_2["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        # print(per_graph_results_2.shape[0])
        #concat

        per_graph_results_all = tf.concat(
            [per_graph_results_1, per_graph_results_2], axis=1)
        # print("ptwo:",per_graph_results_all)
        # print(batch_features_3["node_features"].shape[0])
        with open("embeding.log", "w") as fd:
            fd.write(str(per_graph_results_all))
        per_graph_results_3 = tf.reshape(
            batch_features_3["node_features"][0][0], (1, 1))
        i = tf.cast(batch_features_3["node_features"][0][1], tf.int32)
        # print(i)
        # print(i<batch_features_3["node_features"].shape[0])
        # print(tf.reshape(batch_features_3["node_features"][i][0],(1,1)))
        while (i < batch_features_3["node_features"].shape[0]):

            per_graph_results_3 = tf.concat([
                per_graph_results_3,
                tf.reshape(batch_features_3["node_features"][i][0], (1, 1))
            ],
                                            axis=0)
            i = i + tf.cast(batch_features_3["node_features"][i][1], tf.int32)

        # read_new_inputs
        # per_graph_results_3 = tf.reshape(batch_features_3["node_features"][0][0],(1,1))
        #
        #
        # for i in range(1,per_graph_results_2.shape[0]) :
        #     per_graph_results_3 = tf.concat([per_graph_results_3, tf.reshape(batch_features_3["node_features"][i][0],(1,1))], axis=0)

        # print(per_graph_results_3)
        per_graph_results_all = tf.concat(
            [per_graph_results_all, per_graph_results_3], axis=1)
        # print(per_graph_results_all)

        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results_all)  # Shape [G, 1]

        #fh change
        return per_graph_results