Example #1
0
    def build(self, input_shapes):
        with tf.name_scope(self.__class__.__name__):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self.
                _params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] +
                         self._params["gnn_hidden_dim"])),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                ))

        super().build(input_shapes)
    def build(self, input_shapes):
        with tf.name_scope(self._name):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self.
                _params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] +
                         self._params["gnn_hidden_dim"])),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                ))

            self._graph_repr_to_classification_layer = tf.keras.layers.Dense(
                units=1, activation=tf.nn.sigmoid, use_bias=True)
            self._graph_repr_to_classification_layer.build(
                tf.TensorShape(
                    (None, self._params["graph_aggregation_num_heads"])))

        super().build(input_shapes)
Example #3
0
    def __init__(self,
                 params: Dict[str, Any],
                 dataset: GraphDataset,
                 name: str = None,
                 **kwargs):
        super().__init__(params, dataset=dataset, name=name, **kwargs)
        self._node_to_graph_aggregation = None

        # Construct sublayers:
        self._weighted_avg_of_nodes_to_graph_repr = WeightedSumGraphRepresentation(
            graph_representation_size=self.
            _params["graph_aggregation_output_size"],
            num_heads=self._params["graph_aggregation_num_heads"],
            weighting_fun="softmax",
            scoring_mlp_layers=self._params["graph_aggregation_layers"],
            scoring_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            scoring_mlp_activation_fun="elu",
            transformation_mlp_layers=self._params["graph_aggregation_layers"],
            transformation_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            transformation_mlp_activation_fun="elu",
        )
        self._weighted_sum_of_nodes_to_graph_repr = WeightedSumGraphRepresentation(
            graph_representation_size=self.
            _params["graph_aggregation_output_size"],
            num_heads=self._params["graph_aggregation_num_heads"],
            weighting_fun="sigmoid",
            scoring_mlp_layers=self._params["graph_aggregation_layers"],
            scoring_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            scoring_mlp_activation_fun="elu",
            transformation_mlp_layers=self._params["graph_aggregation_layers"],
            transformation_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            transformation_mlp_activation_fun="elu",
        )

        self._regression_mlp = MLP(
            out_size=1,
            hidden_layers=self._params["regression_mlp_layers"],
            dropout_rate=self._params["regression_mlp_dropout"],
            use_biases=True,
            activation_fun=tf.nn.relu,
        )
Example #4
0
    def build(self, input_shapes: Dict[str, Any]):
        graph_params = {
            name[4:]: value for name, value in self._params.items() if name.startswith("gnn_")
        }
        self.embedding = tf.keras.layers.Embedding(self.vocab_size, self._params["token_embedding_size"])
        self._gnn = GNN(graph_params)
        self._gnn.build(
            GNNInput(
                node_features=self.get_initial_node_feature_shape(input_shapes),
                adjacency_lists=tuple(
                    input_shapes[f"adjacency_list_{edge_type_idx}"]
                    for edge_type_idx in range(self._num_edge_types)
                ),
                node_to_graph_map=tf.TensorShape((None,)),
                num_graphs=tf.TensorShape(()),
            )
        )

        with tf.name_scope(self._name):
          self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
              graph_representation_size=self._params["graph_aggregation_size"],
              num_heads=self._params["graph_aggregation_num_heads"],
              scoring_mlp_layers=self._params["graph_aggregation_hidden_layers"],
              scoring_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
              transformation_mlp_layers=self._params["graph_aggregation_hidden_layers"],
              transformation_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
          )
          self._node_to_graph_repr_layer.build(
              NodesToGraphRepresentationInput(
                  node_embeddings=tf.TensorShape(
                      (None, input_shapes["node_features"][-1] + self._params["gnn_hidden_dim"])
                  ),
                  node_to_graph_map=tf.TensorShape((None)),
                  num_graphs=tf.TensorShape(()),
              )
          )

          self._graph_repr_layer = tf.keras.layers.Dense(
              self._params["graph_encoding_size"], use_bias=True
          )
          self._graph_repr_layer.build(
              tf.TensorShape((None, self._params["graph_aggregation_size"]))
          )
        super().build([])
Example #5
0
    def build(self, tensor_shapes: GraphGlobalExchangeInput):
        """Build the various layers in the model.

        Args:
            tensor_shapes: A GraphGlobalExchangeInput of tensor shapes.

        Returns:
            Nothing, but initialises the layers in the model based on the tensor shapes given.
        """
        self._node_to_graph_representation_layer = WeightedSumGraphRepresentation(
            graph_representation_size=self._hidden_dim,
            weighting_fun=self._weighting_fun,
            num_heads=self._num_heads,
            scoring_mlp_layers=[self._hidden_dim],
        )
        self._node_to_graph_representation_layer.build(
            NodesToGraphRepresentationInput(
                node_embeddings=tensor_shapes.node_embeddings,
                node_to_graph_map=tensor_shapes.node_to_graph_map,
                num_graphs=tensor_shapes.num_graphs,
            ))

        super().build(tensor_shapes)
class GraphBinaryClassificationTask(GraphTaskModel):
    @classmethod
    def get_default_hyperparameters(cls,
                                    mp_style: Optional[str] = None
                                    ) -> Dict[str, Any]:
        super_params = super().get_default_hyperparameters(mp_style)
        these_hypers: Dict[str, Any] = {
            "graph_aggregation_num_heads": 16,
            "graph_aggregation_hidden_layers": [128],
            "graph_aggregation_dropout_rate": 0.2,
        }
        super_params.update(these_hypers)
        return super_params

    def __init__(self,
                 params: Dict[str, Any],
                 dataset: GraphDataset,
                 name: str = None):
        super().__init__(params, dataset=dataset, name=name)
        self._node_to_graph_aggregation = None

    def build(self, input_shapes):
        with tf.name_scope(self._name):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self.
                _params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] +
                         self._params["gnn_hidden_dim"])),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                ))

            self._graph_repr_to_classification_layer = tf.keras.layers.Dense(
                units=1, activation=tf.nn.sigmoid, use_bias=True)
            self._graph_repr_to_classification_layer.build(
                tf.TensorShape(
                    (None, self._params["graph_aggregation_num_heads"])))

        super().build(input_shapes)

    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        training: bool,
    ) -> Any:
        per_graph_results = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features["node_features"], final_node_representations
                ],
                                          axis=-1),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results)  # Shape [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)

    def compute_task_metrics(
        self,
        batch_features: Dict[str, tf.Tensor],
        task_output: Any,
        batch_labels: Dict[str, tf.Tensor],
    ) -> Dict[str, tf.Tensor]:
        ce = tf.reduce_mean(
            tf.keras.losses.binary_crossentropy(
                y_true=batch_labels["target_value"],
                y_pred=task_output,
                from_logits=False))
        num_correct = tf.reduce_sum(
            tf.cast(
                tf.math.equal(batch_labels["target_value"],
                              tf.math.round(task_output)), tf.int32))
        num_graphs = tf.cast(batch_features["num_graphs_in_batch"], tf.float32)
        return {
            "loss": ce,
            "batch_acc": tf.cast(num_correct, tf.float32) / num_graphs,
            "num_correct": num_correct,
            "num_graphs": num_graphs,
        }

    def compute_epoch_metrics(self,
                              task_results: List[Any]) -> Tuple[float, str]:
        total_num_graphs = np.sum(batch_task_result["num_graphs"]
                                  for batch_task_result in task_results)
        total_num_correct = np.sum(batch_task_result["num_correct"]
                                   for batch_task_result in task_results)
        epoch_acc = tf.cast(total_num_correct, tf.float32) / total_num_graphs
        return -epoch_acc.numpy(), f"Accuracy = {epoch_acc.numpy():.3f}"
Example #7
0
class GraphRegressionTask(GraphTaskModel):
    @classmethod
    def get_default_hyperparameters(cls,
                                    mp_style: Optional[str] = None
                                    ) -> Dict[str, Any]:
        super_params = super().get_default_hyperparameters(mp_style)
        these_hypers: Dict[str, Any] = {
            "use_intermediate_gnn_results": True,
            "graph_aggregation_output_size": 32,
            "graph_aggregation_num_heads": 4,
            "graph_aggregation_layers": [32, 32],
            "graph_aggregation_dropout_rate": 0.1,
            "regression_mlp_layers": [64, 32],
            "regression_mlp_dropout": 0.1,
        }
        super_params.update(these_hypers)
        return super_params

    def __init__(self,
                 params: Dict[str, Any],
                 dataset: GraphDataset,
                 name: str = None,
                 **kwargs):
        super().__init__(params, dataset=dataset, name=name, **kwargs)
        self._node_to_graph_aggregation = None

        # Construct sublayers:
        self._weighted_avg_of_nodes_to_graph_repr = WeightedSumGraphRepresentation(
            graph_representation_size=self.
            _params["graph_aggregation_output_size"],
            num_heads=self._params["graph_aggregation_num_heads"],
            weighting_fun="softmax",
            scoring_mlp_layers=self._params["graph_aggregation_layers"],
            scoring_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            scoring_mlp_activation_fun="elu",
            transformation_mlp_layers=self._params["graph_aggregation_layers"],
            transformation_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            transformation_mlp_activation_fun="elu",
        )
        self._weighted_sum_of_nodes_to_graph_repr = WeightedSumGraphRepresentation(
            graph_representation_size=self.
            _params["graph_aggregation_output_size"],
            num_heads=self._params["graph_aggregation_num_heads"],
            weighting_fun="sigmoid",
            scoring_mlp_layers=self._params["graph_aggregation_layers"],
            scoring_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            scoring_mlp_activation_fun="elu",
            transformation_mlp_layers=self._params["graph_aggregation_layers"],
            transformation_mlp_dropout_rate=self.
            _params["graph_aggregation_dropout_rate"],
            transformation_mlp_activation_fun="elu",
        )

        self._regression_mlp = MLP(
            out_size=1,
            hidden_layers=self._params["regression_mlp_layers"],
            dropout_rate=self._params["regression_mlp_dropout"],
            use_biases=True,
            activation_fun=tf.nn.relu,
        )

    def build(self, input_shapes):
        if self._params["use_intermediate_gnn_results"]:
            # We get the initial GNN input + results for all layers:
            node_repr_size = (input_shapes["node_features"][-1] +
                              self._params["gnn_hidden_dim"] *
                              self._params["gnn_num_layers"])
        else:
            node_repr_size = (input_shapes["node_features"][-1] +
                              self._params["gnn_hidden_dim"])

        node_to_graph_repr_input = NodesToGraphRepresentationInput(
            node_embeddings=tf.TensorShape((None, node_repr_size)),
            node_to_graph_map=tf.TensorShape((None)),
            num_graphs=tf.TensorShape(()),
        )

        with tf.name_scope(self.__class__.__name__):
            with tf.name_scope("graph_representation_computation"):
                with tf.name_scope("weighted_avg"):
                    self._weighted_avg_of_nodes_to_graph_repr.build(
                        node_to_graph_repr_input)
                with tf.name_scope("weighted_sum"):
                    self._weighted_sum_of_nodes_to_graph_repr.build(
                        node_to_graph_repr_input)

            self._regression_mlp.build(
                tf.TensorShape(
                    (None, 2 * self._params["graph_aggregation_output_size"])))

        super().build(input_shapes)

    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: Union[tf.Tensor, Tuple[tf.Tensor,
                                                           List[tf.Tensor]]],
        training: bool,
    ) -> Any:
        if self._params["use_intermediate_gnn_results"]:
            _, intermediate_node_representations = final_node_representations
            # We want to skip the first "intermediate" representation, which is the output of
            # the initial feature -> GNN input layer:
            node_representations = tf.concat(
                (batch_features["node_features"], ) +
                intermediate_node_representations[1:],
                axis=-1,
            )
        else:
            node_representations = tf.concat(
                [batch_features["node_features"], final_node_representations],
                axis=-1)

        graph_representation_layer_input = NodesToGraphRepresentationInput(
            node_embeddings=node_representations,
            node_to_graph_map=batch_features["node_to_graph_map"],
            num_graphs=batch_features["num_graphs_in_batch"],
        )
        weighted_avg_graph_repr = self._weighted_avg_of_nodes_to_graph_repr(
            graph_representation_layer_input, training=training)
        weighted_sum_graph_repr = self._weighted_sum_of_nodes_to_graph_repr(
            graph_representation_layer_input, training=training)

        graph_representations = tf.concat(
            [weighted_avg_graph_repr, weighted_sum_graph_repr],
            axis=-1)  # shape: [G, GD]

        per_graph_results = self._regression_mlp(
            graph_representations, training=training)  # shape: [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)

    def compute_task_metrics(
        self,
        batch_features: Dict[str, tf.Tensor],
        task_output: Any,
        batch_labels: Dict[str, tf.Tensor],
    ) -> Dict[str, tf.Tensor]:
        mse = tf.losses.mean_squared_error(batch_labels["target_value"],
                                           task_output)
        mae = tf.losses.mean_absolute_error(batch_labels["target_value"],
                                            task_output)
        num_graphs = tf.cast(batch_features["num_graphs_in_batch"], tf.float32)
        return {
            "loss": mse,
            "batch_squared_error": mse * num_graphs,
            "batch_absolute_error": mae * num_graphs,
            "num_graphs": num_graphs,
        }

    def compute_epoch_metrics(self,
                              task_results: List[Any]) -> Tuple[float, str]:
        total_num_graphs = sum(batch_task_result["num_graphs"]
                               for batch_task_result in task_results)
        total_absolute_error = sum(batch_task_result["batch_absolute_error"]
                                   for batch_task_result in task_results)
        total_squared_error = sum(batch_task_result["batch_squared_error"]
                                  for batch_task_result in task_results)
        epoch_mse = (total_squared_error / total_num_graphs).numpy()
        epoch_mae = (total_absolute_error / total_num_graphs).numpy()
        return epoch_mae, f" MSE = {epoch_mse:.3f} | MAE = {epoch_mae:.3f}"

    def evaluate_model(self, dataset: tf.data.Dataset) -> Dict[str, float]:
        import sklearn.metrics as metrics

        predictions = self.predict(dataset).numpy()
        labels = []
        for _, batch_labels in dataset:
            labels.append(batch_labels["target_value"])
        labels = tf.concat(labels, axis=0).numpy()

        metrics = dict(
            mae=metrics.mean_absolute_error(y_true=labels, y_pred=predictions),
            mse=metrics.mean_squared_error(y_true=labels, y_pred=predictions),
            max_err=metrics.max_error(y_true=labels, y_pred=predictions),
            expl_var=metrics.explained_variance_score(y_true=labels,
                                                      y_pred=predictions),
            r2_score=metrics.r2_score(y_true=labels, y_pred=predictions),
        )

        return metrics
Example #8
0
class GraphEncoder(tf.keras.Model):
    @classmethod
    def get_default_hyperparameters(cls, mp_style: Optional[str] = None) -> Dict[str, Any]:
        """Get the default hyperparameter dictionary for the class."""
        params = {f"gnn_{name}": value for name, value in GNN.get_default_hyperparameters(mp_style).items()}
        these_hypers: Dict[str, Any] = {
            "graph_aggregation_size": 256,
            "graph_aggregation_num_heads": 16,
            "graph_aggregation_hidden_layers": [128],
            "graph_aggregation_dropout_rate": 0.2,
            "token_embedding_size":  64,
            "gnn_message_calculation_class": "gnn_edge_mlp",
            "gnn_hidden_dim": 64,
            "gnn_global_exchange_mode": "mlp",
            "gnn_global_exchange_every_num_layers": 10,
            "gnn_num_layers": 4,
            "graph_encoding_size": 256,
        }
        params.update(these_hypers)
        return params

    def __init__(self, params: Dict[str, Any], vocab_size, name: str = None):
        super().__init__(name=name)
        self._params = params
        self._num_edge_types = 1
        self._token_embedding_size = params["token_embedding_size"]
        self.vocab_size = vocab_size
            

    def build(self, input_shapes: Dict[str, Any]):
        graph_params = {
            name[4:]: value for name, value in self._params.items() if name.startswith("gnn_")
        }
        self.embedding = tf.keras.layers.Embedding(self.vocab_size, self._params["token_embedding_size"])
        self._gnn = GNN(graph_params)
        self._gnn.build(
            GNNInput(
                node_features=self.get_initial_node_feature_shape(input_shapes),
                adjacency_lists=tuple(
                    input_shapes[f"adjacency_list_{edge_type_idx}"]
                    for edge_type_idx in range(self._num_edge_types)
                ),
                node_to_graph_map=tf.TensorShape((None,)),
                num_graphs=tf.TensorShape(()),
            )
        )

        with tf.name_scope(self._name):
          self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
              graph_representation_size=self._params["graph_aggregation_size"],
              num_heads=self._params["graph_aggregation_num_heads"],
              scoring_mlp_layers=self._params["graph_aggregation_hidden_layers"],
              scoring_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
              transformation_mlp_layers=self._params["graph_aggregation_hidden_layers"],
              transformation_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
          )
          self._node_to_graph_repr_layer.build(
              NodesToGraphRepresentationInput(
                  node_embeddings=tf.TensorShape(
                      (None, input_shapes["node_features"][-1] + self._params["gnn_hidden_dim"])
                  ),
                  node_to_graph_map=tf.TensorShape((None)),
                  num_graphs=tf.TensorShape(()),
              )
          )

          self._graph_repr_layer = tf.keras.layers.Dense(
              self._params["graph_encoding_size"], use_bias=True
          )
          self._graph_repr_layer.build(
              tf.TensorShape((None, self._params["graph_aggregation_size"]))
          )
        super().build([])

    def get_initial_node_feature_shape(self, input_shapes) -> tf.TensorShape:
        return (None, self._token_embedding_size)

    def compute_initial_node_features(self, inputs, training: bool) -> tf.Tensor:
        return tf.squeeze(self.embedding(inputs["node_features"]))

    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        training: bool,
    ) -> Any:
      per_graph_results = self._node_to_graph_repr_layer(
        NodesToGraphRepresentationInput(
          node_embeddings=tf.concat(
              [batch_features["node_features"], final_node_representations], axis=-1
          ),
          node_to_graph_map=batch_features["node_to_graph_map"],
          num_graphs=batch_features["num_graphs_in_batch"],
        )
      )  # Shape [G, graph_aggregation_num_heads]
      per_graph_results = self._graph_repr_layer(
          per_graph_results
      )  # Shape [G, graph_encoding_size]

      return per_graph_results

    def call(self, inputs, training: bool, seq_enc_output = None):
        # Pack input data from keys back into a tuple:
        adjacency_lists: Tuple[tf.Tensor, ...] = tuple(
            inputs[f"adjacency_list_{edge_type_idx}"]
            for edge_type_idx in range(self._num_edge_types)
        )

        # Start the model computations:
        initial_node_features = self.compute_initial_node_features(inputs, training)
        if tf.is_tensor(seq_enc_output):
            node_features = tf.split(initial_node_features, inputs["graph_to_num_nodes"])
            n_tokens = seq_enc_output.shape[1]
            node_features = [
                tf.concat([source_features[1:source_len+1],  graph_features[min(source_len, n_tokens-1):, :]
                ], 0) for graph_features, source_features, source_len in zip(node_features, seq_enc_output, inputs["source_len"])]
            initial_node_features = tf.concat(node_features, 0)

        gnn_input = GNNInput(
            node_features=initial_node_features,
            adjacency_lists=adjacency_lists,
            node_to_graph_map=inputs["node_to_graph_map"],
            num_graphs=inputs["num_graphs_in_batch"],
        )
        final_node_representations = self._gnn(gnn_input, training)
        return self.compute_task_output(inputs, final_node_representations, training), final_node_representations
class GraphBinaryClassificationTask(GraphTaskModel):
    @classmethod
    def get_default_hyperparameters(cls, mp_style: Optional[str] = None) -> Dict[str, Any]:
        super_params = super().get_default_hyperparameters(mp_style)
        these_hypers: Dict[str, Any] = {
            "graph_aggregation_num_heads": 16,
            "graph_aggregation_hidden_layers": [128],
            "graph_aggregation_dropout_rate": 0.2,
        }
        super_params.update(these_hypers)
        return super_params

    def __init__(self, params: Dict[str, Any], dataset: GraphDataset, name: str = None):
        super().__init__(params, dataset=dataset, name=name)
        self._node_to_graph_aggregation = None

    def build(self, input_shapes):
        with tf.name_scope(self._name):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self._params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self._params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self._params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] + self._params["gnn_hidden_dim"])
                    ),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                )
            )

            self._graph_repr_to_classification_layer = tf.keras.layers.Dense(
                units=1, activation=tf.nn.sigmoid, use_bias=True
            )
            # change the  self._params["graph_aggregation_num_heads"] to yours
            self._graph_repr_to_classification_layer.build(
                tf.TensorShape((None, self._params["graph_aggregation_num_heads"] * 2 + 2))
            )
        super().build(input_shapes)

    def compute_task_output(
            self,
            batch_features: Dict[str, tf.Tensor],
            final_node_representations: tf.Tensor,
            training: bool,
    ) -> Any:
        per_graph_results = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features["node_features"], final_node_representations], axis=-1
                ),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results
        )  # Shape [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)

    # New COMPUTE
    def compute_task_output_new(
            self,
            batch_features: Dict[str, tf.Tensor],
            final_node_representations: tf.Tensor,
            batch_features_2: Dict[str, tf.Tensor],
            final_node_representations_2: tf.Tensor,
            batch_features_3: Dict[str, tf.Tensor],
            training: bool,
    ) -> Any:
        per_graph_results_1 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features["node_features"], final_node_representations], axis=-1
                ),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        # second
        per_graph_results_2 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features_2["node_features"], final_node_representations_2], axis=-1
                ),
                node_to_graph_map=batch_features_2["node_to_graph_map"],
                num_graphs=batch_features_2["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]

        # ------------------------------------------------------------------

        per_graph_results_all = tf.concat([per_graph_results_1, per_graph_results_2], axis=1)  # 先拼接前两个 10行32维
        # print("拼接的前两个", per_graph_results_all)
        with open("embeding.log", "w") as fd:
            fd.write(str(per_graph_results_all))

        '''--------------------------------------------------'''

        # {"Property": 0.0, "graph": {"add":[1.25,0.36], "node_features": [[0.023, 40, 1.5590395, 0.37866586,
        per_graph_results_3 = tf.reshape([batch_features_3["node_features"][0][0],batch_features_3["node_features"][0][2]], (1, 2))
        #设置一行两列格式 tf.Tensor([[0.99]], shape=(1, 1), dtype=float32)
        i = tf.cast(batch_features_3["node_features"][0][1],tf.int32)

        #tf.Tensor([[0.505]], shape=(1, 1), dtype=float32)
        while(i<batch_features_3["node_features"].shape[0]):
            per_graph_results_3 = tf.concat(
                [per_graph_results_3, tf.reshape([batch_features_3["node_features"][i][0],batch_features_3["node_features"][i][2]], (1, 2))], axis=0)
            i = i + tf.cast(batch_features_3["node_features"][i][1],tf.int32)



        per_graph_results_all = tf.concat([per_graph_results_all, per_graph_results_3], axis=1)
        # print("拼接前三个", per_graph_results_all)
#         with open("embeding22.log", "a") as fd:
#             fd.write(str(per_graph_results_all))

        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results_all
        )  # Shape [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)

    def compute_task_metrics(
            self,
            batch_features: Dict[str, tf.Tensor],
            task_output: Any,
            batch_labels: Dict[str, tf.Tensor],
    ) -> Dict[str, tf.Tensor]:
        ce = tf.reduce_mean(
            tf.keras.losses.binary_crossentropy(
                y_true=batch_labels["target_value"], y_pred=task_output, from_logits=False
            )
        )
        # a=tf.math.round(task_output)
        # add by zjq
        a = tf.math.round(task_output)
        print(a)
#         with open("log_result.txt","a") as fd:
#             fd.write(str(a))




        num_correct = tf.reduce_sum(
            tf.cast(
                tf.math.equal(batch_labels["target_value"], tf.math.round(task_output)), tf.int32
            )
        )
        num_graphs = tf.cast(batch_features["num_graphs_in_batch"], tf.float32)
        return {
            "loss": ce,
            "batch_acc": tf.cast(num_correct, tf.float32) / num_graphs,
            "num_correct": num_correct,
            "num_graphs": num_graphs,
        }

    def compute_epoch_metrics(self, task_results: List[Any]) -> Tuple[float, str]:
        total_num_graphs = np.sum(
            batch_task_result["num_graphs"] for batch_task_result in task_results
        )
        total_num_correct = np.sum(
            batch_task_result["num_correct"] for batch_task_result in task_results
        )
        epoch_acc = tf.cast(total_num_correct, tf.float32) / total_num_graphs
        return -epoch_acc.numpy(), f"Accuracy = {epoch_acc.numpy():.3f}"
Example #10
0
class GraphGlobalExchange(tf.keras.layers.Layer):
    """Update node representations based on graph-global information."""
    def __init__(
        self,
        hidden_dim: int,
        weighting_fun: str = "softmax",
        num_heads: int = 4,
        dropout_rate: float = 0.0,
    ):
        """Initialise the layer."""
        super().__init__()
        self._hidden_dim = hidden_dim
        self._weighting_fun = weighting_fun
        self._num_heads = num_heads
        self._dropout_rate = dropout_rate

    def build(self, tensor_shapes: GraphGlobalExchangeInput):
        """Build the various layers in the model.

        Args:
            tensor_shapes: A GraphGlobalExchangeInput of tensor shapes.

        Returns:
            Nothing, but initialises the layers in the model based on the tensor shapes given.
        """
        self._node_to_graph_representation_layer = WeightedSumGraphRepresentation(
            graph_representation_size=self._hidden_dim,
            weighting_fun=self._weighting_fun,
            num_heads=self._num_heads,
            scoring_mlp_layers=[self._hidden_dim],
        )
        self._node_to_graph_representation_layer.build(
            NodesToGraphRepresentationInput(
                node_embeddings=tensor_shapes.node_embeddings,
                node_to_graph_map=tensor_shapes.node_to_graph_map,
                num_graphs=tensor_shapes.num_graphs,
            ))

        super().build(tensor_shapes)

    @abstractmethod
    def call(self, inputs: GraphGlobalExchangeInput, training: bool = False):
        """
        Args:
            inputs: A GraphGlobalExchangeInput containing the following fields:
                node_features: float32 tensor of shape [V, D], the original representation
                    of each node in the graph.
                
                node_to_graph_map: int32 tensor of shape [V], where node_to_graph_map[v] = i
                    means that node v belongs to graph i in the batch.
    
                num_graphs: int32 tensor of shape [], specifying number of graphs in batch.

            training: A bool representing whether the model is training or evaluating.

        Returns:
            A tensor of shape [V, hidden_dim]. The tensor represents the encoding of the
            states updated with information from the entire graph.
        """
        pass

    def _compute_per_node_graph_representations(
            self, inputs: GraphGlobalExchangeInput, training: bool = False):
        cur_graph_representations = self._node_to_graph_representation_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=inputs.node_embeddings,
                node_to_graph_map=inputs.node_to_graph_map,
                num_graphs=inputs.num_graphs,
            ),
            training=training,
        )  # Shape [G, hidden_dim]

        per_node_graph_representations = gather_dense_gradient(
            cur_graph_representations,
            inputs.node_to_graph_map)  # Shape [V, hidden_dim]

        if training:
            per_node_graph_representations = tf.nn.dropout(
                per_node_graph_representations, rate=self._dropout_rate)

        return per_node_graph_representations
Example #11
0
class GraphRegressionTask(GraphTaskModel):
    @classmethod
    def get_default_hyperparameters(cls,
                                    mp_style: Optional[str] = None
                                    ) -> Dict[str, Any]:
        super_params = super().get_default_hyperparameters(mp_style)
        these_hypers: Dict[str, Any] = {
            "graph_aggregation_num_heads": 4,
            "graph_aggregation_hidden_layers": [128],
            "graph_aggregation_dropout_rate": 0.2,
        }
        super_params.update(these_hypers)
        return super_params

    def __init__(self,
                 params: Dict[str, Any],
                 dataset: GraphDataset,
                 name: str = None):
        super().__init__(params, dataset=dataset, name=name)
        self._node_to_graph_aggregation = None

    def build(self, input_shapes):
        with tf.name_scope(self._name):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self.
                _params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] +
                         self._params["gnn_hidden_dim"])),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                ))

        super().build(input_shapes)

    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        training: bool,
    ) -> Any:
        per_graph_results = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features["node_features"], final_node_representations
                ],
                                          axis=-1),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        per_graph_results = tf.reduce_sum(per_graph_results,
                                          axis=-1)  # Shape [G]

        return per_graph_results

    def compute_task_metrics(
        self,
        batch_features: Dict[str, tf.Tensor],
        task_output: Any,
        batch_labels: Dict[str, tf.Tensor],
    ) -> Dict[str, tf.Tensor]:
        mse = tf.losses.mean_squared_error(batch_labels["target_value"],
                                           task_output)
        mae = tf.losses.mean_absolute_error(batch_labels["target_value"],
                                            task_output)
        num_graphs = tf.cast(batch_features["num_graphs_in_batch"], tf.float32)
        return {
            "loss": mse,
            "batch_squared_error": mse * num_graphs,
            "batch_absolute_error": mae * num_graphs,
            "num_graphs": num_graphs,
        }

    def compute_epoch_metrics(self,
                              task_results: List[Any]) -> Tuple[float, str]:
        total_num_graphs = sum(batch_task_result["num_graphs"]
                               for batch_task_result in task_results)
        total_absolute_error = sum(batch_task_result["batch_absolute_error"]
                                   for batch_task_result in task_results)
        epoch_mae = total_absolute_error / total_num_graphs
        return epoch_mae.numpy(
        ), f"Mean Absolute Error = {epoch_mae.numpy():.3f}"
Example #12
0
class GraphBinaryClassificationTask(GraphTaskModel):
    @classmethod
    def get_default_hyperparameters(cls,
                                    mp_style: Optional[str] = None
                                    ) -> Dict[str, Any]:
        super_params = super().get_default_hyperparameters(mp_style)
        these_hypers: Dict[str, Any] = {
            "graph_aggregation_num_heads": 16,
            "graph_aggregation_hidden_layers": [128],
            "graph_aggregation_dropout_rate": 0.2,
        }
        super_params.update(these_hypers)
        return super_params

    def __init__(self,
                 params: Dict[str, Any],
                 dataset: GraphDataset,
                 name: str = None):
        super().__init__(params, dataset=dataset, name=name)
        self._node_to_graph_aggregation = None

    def build(self, input_shapes):
        with tf.name_scope(self._name):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self.
                _params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self.
                _params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self.
                _params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] +
                         self._params["gnn_hidden_dim"])),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                ))
            #fh change
            # self._graph_repr_to_classification_layer = tf.keras.layers.Dense(
            #     units=1, activation=tf.nn.sigmoid, use_bias=True
            # )

            self._graph_repr_to_classification_layer = tf.keras.layers.Dense(
                units=5, activation=tf.nn.sigmoid, use_bias=True)
            #change the  self._params["graph_aggregation_num_heads"] to yours
            self._graph_repr_to_classification_layer.build(
                tf.TensorShape(
                    (None,
                     self._params["graph_aggregation_num_heads"] * 2 + 1)))
        super().build(input_shapes)

    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        training: bool,
    ) -> Any:
        per_graph_results = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features["node_features"], final_node_representations
                ],
                                          axis=-1),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results)  # Shape [G, 1]
        #print("ptwo:",per_graph_results)
        return tf.squeeze(per_graph_results, axis=-1)

    #New COMPUTE
    def compute_task_output_new(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        batch_features_2: Dict[str, tf.Tensor],
        final_node_representations_2: tf.Tensor,
        batch_features_3: Dict[str, tf.Tensor],
        training: bool,
    ) -> Any:
        per_graph_results_1 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features["node_features"], final_node_representations
                ],
                                          axis=-1),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        #second
        per_graph_results_2 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat([
                    batch_features_2["node_features"],
                    final_node_representations_2
                ],
                                          axis=-1),
                node_to_graph_map=batch_features_2["node_to_graph_map"],
                num_graphs=batch_features_2["num_graphs_in_batch"],
            ))  # Shape [G, graph_aggregation_num_heads]
        # print(per_graph_results_2.shape[0])
        #concat

        per_graph_results_all = tf.concat(
            [per_graph_results_1, per_graph_results_2], axis=1)
        # print("ptwo:",per_graph_results_all)
        # print(batch_features_3["node_features"].shape[0])
        with open("embeding.log", "w") as fd:
            fd.write(str(per_graph_results_all))
        per_graph_results_3 = tf.reshape(
            batch_features_3["node_features"][0][0], (1, 1))
        i = tf.cast(batch_features_3["node_features"][0][1], tf.int32)
        # print(i)
        # print(i<batch_features_3["node_features"].shape[0])
        # print(tf.reshape(batch_features_3["node_features"][i][0],(1,1)))
        while (i < batch_features_3["node_features"].shape[0]):

            per_graph_results_3 = tf.concat([
                per_graph_results_3,
                tf.reshape(batch_features_3["node_features"][i][0], (1, 1))
            ],
                                            axis=0)
            i = i + tf.cast(batch_features_3["node_features"][i][1], tf.int32)

        # read_new_inputs
        # per_graph_results_3 = tf.reshape(batch_features_3["node_features"][0][0],(1,1))
        #
        #
        # for i in range(1,per_graph_results_2.shape[0]) :
        #     per_graph_results_3 = tf.concat([per_graph_results_3, tf.reshape(batch_features_3["node_features"][i][0],(1,1))], axis=0)

        # print(per_graph_results_3)
        per_graph_results_all = tf.concat(
            [per_graph_results_all, per_graph_results_3], axis=1)
        # print(per_graph_results_all)

        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results_all)  # Shape [G, 1]

        #fh change
        return per_graph_results

        # return tf.squeeze(per_graph_results, axis=-1)

    def compute_task_metrics(
        self,
        batch_features: Dict[str, tf.Tensor],
        task_output: Any,
        batch_labels: Dict[str, tf.Tensor],
    ) -> Dict[str, tf.Tensor]:
        #fh change
        # ce = tf.reduce_mean(
        #     tf.keras.losses.binary_crossentropy(
        #         y_true=batch_labels["target_value"], y_pred=task_output, from_logits=False
        #     )
        # )
        ce = tf.reduce_mean(
            tf.keras.losses.categorical_crossentropy(
                y_true=batch_labels["target_value"],
                y_pred=task_output,
                from_logits=False))
        print("pred")
        for _ in task_output:
            print(_)

        # a=tf.math.round(task_output)
        # num_correct = tf.reduce_sum(
        #     tf.cast(
        #         tf.math.equal(batch_labels["target_value"], tf.math.round(task_output)), tf.int32
        #     )
        # )
        #fh change
        num_correct = tf.reduce_sum(
            tf.cast(
                tf.math.equal(
                    tf.math.argmax(batch_labels["target_value"], axis=1),
                    tf.math.argmax(task_output, axis=1)), tf.int32))
        num_graphs = tf.cast(batch_features["num_graphs_in_batch"], tf.float32)
        return {
            "loss": ce,
            "batch_acc": tf.cast(num_correct, tf.float32) / num_graphs,
            "num_correct": num_correct,
            "num_graphs": num_graphs,
        }

    def compute_epoch_metrics(self,
                              task_results: List[Any]) -> Tuple[float, str]:
        total_num_graphs = np.sum(batch_task_result["num_graphs"]
                                  for batch_task_result in task_results)
        total_num_correct = np.sum(batch_task_result["num_correct"]
                                   for batch_task_result in task_results)
        epoch_acc = tf.cast(total_num_correct, tf.float32) / total_num_graphs
        return -epoch_acc.numpy(), f"Accuracy = {epoch_acc.numpy():.3f}"
class GraphBinaryClassificationTask(GraphTaskModel):
    @classmethod
    def get_default_hyperparameters(cls, mp_style: Optional[str] = None) -> Dict[str, Any]:
        super_params = super().get_default_hyperparameters(mp_style)
        these_hypers: Dict[str, Any] = {
            "graph_aggregation_num_heads": 16,
            "graph_aggregation_hidden_layers": [128],
            "graph_aggregation_dropout_rate": 0.2,
        }
        super_params.update(these_hypers)
        return super_params

    def __init__(self, params: Dict[str, Any], dataset: GraphDataset, name: str = None):
        super().__init__(params, dataset=dataset, name=name)
        self._node_to_graph_aggregation = None

    def build(self, input_shapes):
        with tf.name_scope(self._name):
            self._node_to_graph_repr_layer = WeightedSumGraphRepresentation(
                graph_representation_size=self._params["graph_aggregation_num_heads"],
                num_heads=self._params["graph_aggregation_num_heads"],
                scoring_mlp_layers=self._params["graph_aggregation_hidden_layers"],
                scoring_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
                transformation_mlp_layers=self._params["graph_aggregation_hidden_layers"],
                transformation_mlp_dropout_rate=self._params["graph_aggregation_dropout_rate"],
            )
            self._node_to_graph_repr_layer.build(
                NodesToGraphRepresentationInput(
                    node_embeddings=tf.TensorShape(
                        (None, input_shapes["node_features"][-1] + self._params["gnn_hidden_dim"])
                    ),
                    node_to_graph_map=tf.TensorShape((None)),
                    num_graphs=tf.TensorShape(()),
                )
            )

            self._graph_repr_to_classification_layer = tf.keras.layers.Dense(
                units=1, activation=tf.nn.sigmoid, use_bias=True
            )
            #change the  self._params["graph_aggregation_num_heads"] to yours
            self._graph_repr_to_classification_layer.build(
                tf.TensorShape((None, self._params["graph_aggregation_num_heads"]*2))
            )
        super().build(input_shapes)

    def compute_task_output(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        training: bool,
    ) -> Any:
        per_graph_results = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features["node_features"], final_node_representations], axis=-1
                ),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results
        )  # Shape [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)

    #New COMPUTE
    def compute_task_output_new(
        self,
        batch_features: Dict[str, tf.Tensor],
        final_node_representations: tf.Tensor,
        batch_features_2: Dict[str, tf.Tensor],
        final_node_representations_2:tf.Tensor,
        training: bool,
    ) -> Any:
        per_graph_results_1 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features["node_features"], final_node_representations], axis=-1
                ),
                node_to_graph_map=batch_features["node_to_graph_map"],
                num_graphs=batch_features["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        #second
        per_graph_results_2 = self._node_to_graph_repr_layer(
            NodesToGraphRepresentationInput(
                node_embeddings=tf.concat(
                    [batch_features_2["node_features"], final_node_representations_2], axis=-1
                ),
                node_to_graph_map=batch_features_2["node_to_graph_map"],
                num_graphs=batch_features_2["num_graphs_in_batch"],
            )
        )  # Shape [G, graph_aggregation_num_heads]
        #concat
        per_graph_results_all=tf.concat([per_graph_results_1, per_graph_results_2], axis=1)

        per_graph_results = self._graph_repr_to_classification_layer(
            per_graph_results_all
        )  # Shape [G, 1]

        return tf.squeeze(per_graph_results, axis=-1)

    def compute_task_metrics(
        self,
        batch_features: Dict[str, tf.Tensor],
        task_output: Any,
        batch_labels: Dict[str, tf.Tensor],
    ) -> Dict[str, tf.Tensor]:
        ce = tf.reduce_mean(
            tf.keras.losses.binary_crossentropy(
                y_true=batch_labels["target_value"], y_pred=task_output, from_logits=False
            )
        )
        TP=0
        FP=0
        TN=0
        FN=0
        for (i,j) in zip(batch_labels["target_value"],tf.math.round(task_output)):
            if i==1:
                if i==j:
                    TP +=1
                    continue
                if i!=j:
                    FP +=1
                    continue
            if i==0:
                if i==j:
                    TN+=1
                    continue
                if i!=j:
                    FN+=1
                    continue

            # accuracy
        num_correct = tf.reduce_sum(
            tf.cast(
                tf.math.equal(batch_labels["target_value"], tf.math.round(task_output)), tf.int32
            )
        )
        num_graphs = tf.cast(batch_features["num_graphs_in_batch"], tf.float32)

        try:
            ACC = (TP + TN) /num_graphs
            Precision = TP / (TP + FP)
            Recall = TP / (TP + FN)
            F1= 2*TP/ (2*TP + FN + FP )
            TPR=TP / (TP + FN)
            FPR= FP / (FP + TN)
            TNR=TN/(FP+TN)
            FNR =FN/(FN+TP)
        except:
            ACC = 0
            Precision = 0
            Recall = 0
            F1= 0
            TPR= 0
            FPR= 0
            TNR= 0
            FNR = 0
            print("Lucky next,go ahead")

        return {
            "loss": ce,
            "batch_acc": tf.cast(num_correct, tf.float32) / num_graphs,
            "batch_precision": Precision,
            "batch_recall": Recall,
            "batch_f1": F1,
            "batch_TPR": TPR,
            "batch_FPR": FPR,
            "batch_TNR": TNR,
            "batch_FNR": FNR,
            "num_correct": num_correct,
            "num_graphs": num_graphs,

        }

    def compute_epoch_metrics(self, task_results: List[Any]) -> Tuple[float, str]:
        total_num_graphs = np.sum(
            batch_task_result["num_graphs"] for batch_task_result in task_results
        )
        total_num_correct = np.sum(
            batch_task_result["num_correct"] for batch_task_result in task_results
        )
        epoch_acc = tf.cast(total_num_correct, tf.float32) / total_num_graphs
        epoch_precision=task_results[0]["batch_precision"]
        epoch_recall=task_results[0]["batch_recall"]
        epoch_f1=task_results[0]["batch_f1"]
        epoch_TPR=task_results[0]["batch_TPR"]
        epoch_FPR=task_results[0]["batch_FPR"]
        epoch_TNR=task_results[0]["batch_TNR"]
        epoch_FNR=task_results[0]["batch_FNR"]

        return -epoch_acc.numpy(), f"Accuracy = {epoch_acc.numpy():.3f}",\
               epoch_precision, f"precision = {epoch_precision:.3f}",epoch_recall, f"recall = {epoch_recall:.3f}"\
            ,epoch_f1, f"f1 = {epoch_f1:.3f}",epoch_TPR, f"TPR = {epoch_TPR:.3f}",epoch_FPR, f"FPR = {epoch_FPR:.3f}",\
               epoch_TNR, f"TNR = {epoch_TNR:.3f}",epoch_FNR, f"FNR = {epoch_FNR:.3f}"