def call(self, inputs, **kwargs): """Forward pass. The tensor representation can be tf.RaggedTensor, tf.Tensor or a list of (values, partition). The RaggedTensor has shape (batch, None, F) or in case of equal sized graphs (batch, N, F). For disjoint representation (values, partition), the node embeddings are given by a flatten value tensor of shape (batch*None, F) and a partition tensor of either "row_length", "row_splits" or "value_rowids" that matches the tf.RaggedTensor partition information. In this case the partition_type and node_indexing scheme, i.e. "batch", must be known by the layer. For edge indices, the last dimension holds indices from outgoing to ingoing node (i,j) as a directed edge. Args: inputs: Edge features or message embeddings of shape (batch, [N], F) Returns: tf.tensor: Pooled edges feature list of shape (batch,F). """ found_edge_type = kgcnn_ops_get_tensor_type(inputs, input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) edge, edge_part = kgcnn_ops_dyn_cast(inputs, input_tensor_type=found_edge_type, output_tensor_type="values_partition", partition_type=self.partition_type) batchi = kgcnn_ops_change_partition_type(edge_part, self.partition_type, "value_rowids") out = kgcnn_ops_segment_operation_by_name(self.pooling_method, edge, batchi) # Output already has correct shape and type return out
def call(self, inputs, **kwargs): """Forward pass. Args: inputs (list): of [node, edges, edge_index, weights] - nodes (tf.ragged): Node features of shape (batch, [N], F) - edges (tf.ragged): Edge or message features of shape (batch, [M], F) - edge_index (tf.ragged): Edge indices of shape (batch, [M], 2) - weights (tf.ragged): Edge or message weights. Must broadcast to edges or messages, e.g. (batch, [M], 1) Returns: features: Pooled feature tensor of pooled edge features for each node of shape (batch, [N], F) """ dyn_inputs = self._kgcnn_map_input_ragged(inputs, 4) # We cast to values here nod, node_part = dyn_inputs[0].values, dyn_inputs[0].row_splits edge, _ = dyn_inputs[1].values, dyn_inputs[1].row_lengths() edgeind, edge_part = dyn_inputs[2].values, dyn_inputs[2].row_lengths() weights, _ = dyn_inputs[3].values, dyn_inputs[3].row_lengths() shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition( edgeind, node_part, edge_part, partition_type_node="row_splits", partition_type_edge="row_length", to_indexing='batch', from_indexing=self.node_indexing) wval = weights dens = edge * wval nodind = shiftind[:, 0] if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) wval = tf.gather(wval, node_order, axis=0) # Pooling via e.g. segment_sum get = kgcnn_ops_segment_operation_by_name(self.pooling_method, dens, nodind) if self.normalize_by_weights: get = tf.math.divide_no_nan(get, tf.math.segment_sum(wval, nodind)) # +tf.eps if self.has_unconnected: get = kgcnn_ops_scatter_segment_tensor_nd(get, nodind, tf.shape(nod)) out = self._kgcnn_map_output_ragged([get, node_part], "row_splits", 0) return out
def call(self, inputs, **kwargs): """Forward pass. The tensor representation can be tf.RaggedTensor, tf.Tensor or a list of (values, partition). The RaggedTensor has shape (batch, None, F) or in case of equal sized graphs (batch, N, F). For disjoint representation (values, partition), the node embeddings are given by a flatten value tensor of shape (batch*None, F) and a partition tensor of either "row_length", "row_splits" or "value_rowids" that matches the tf.RaggedTensor partition information. In this case the partition_type and node_indexing scheme, i.e. "batch", must be known by the layer. For edge indices, the last dimension holds indices from outgoing to ingoing node (i,j) as a directed edge. Args: inputs (list): of [node, edges, edge_index] - nodes: Node features of shape (batch, [N], F) - edges: Edge or message features of shape (batch, [N], F) - edge_index: Edge indices of shape (batch, [N], 2) Returns: features: Pooled feature tensor of pooled edge features for each node. """ found_node_type = kgcnn_ops_get_tensor_type(inputs[0], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_edge_type = kgcnn_ops_get_tensor_type(inputs[1], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_index_type = kgcnn_ops_get_tensor_type(inputs[2], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) nod, node_part = kgcnn_ops_dyn_cast(inputs[0], input_tensor_type=found_node_type, output_tensor_type="values_partition", partition_type=self.partition_type) edge, _ = kgcnn_ops_dyn_cast(inputs[1], input_tensor_type=found_edge_type, output_tensor_type="values_partition", partition_type=self.partition_type) edgeind, edge_part = kgcnn_ops_dyn_cast(inputs[2], input_tensor_type=found_index_type, output_tensor_type="values_partition", partition_type=self.partition_type) shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition(edgeind, node_part, edge_part, partition_type_node=self.partition_type, partition_type_edge=self.partition_type, to_indexing='batch', from_indexing=self.node_indexing) nodind = shiftind[:, 0] # Pick first index eg. ingoing dens = edge if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) # Pooling via e.g. segment_sum out = kgcnn_ops_segment_operation_by_name(self.pooling_method, dens, nodind) if self.has_unconnected: out = kgcnn_ops_scatter_segment_tensor_nd(out, nodind, tf.shape(nod)) return kgcnn_ops_dyn_cast([out, node_part], input_tensor_type="values_partition", output_tensor_type=found_node_type, partition_type=self.partition_type)
def call(self, inputs, **kwargs): """Forward pass. Args: inputs (list): of [node, edges, edge_index] - nodes (tf.ragged): Node features of shape (batch, [N], F) - edges (tf.ragged): Edge or message features of shape (batch, [M], F) - edge_index (tf.ragged): Edge indices of shape (batch, [M], 2) Returns: features: Pooled feature tensor of pooled edge features for each node. """ dyn_inputs = self._kgcnn_map_input_ragged(inputs, 3) # We cast to values here nod, node_part = dyn_inputs[0].values, dyn_inputs[0].row_splits edge, _ = dyn_inputs[1].values, dyn_inputs[1].row_lengths() edgeind, edge_part = dyn_inputs[2].values, dyn_inputs[2].row_lengths() shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition( edgeind, node_part, edge_part, partition_type_node="row_splits", partition_type_edge="row_length", to_indexing='batch', from_indexing=self.node_indexing) nodind = shiftind[:, 0] # Pick first index eg. ingoing dens = edge if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) # Pooling via e.g. segment_sum out = kgcnn_ops_segment_operation_by_name(self.pooling_method, dens, nodind) if self.has_unconnected: out = kgcnn_ops_scatter_segment_tensor_nd(out, nodind, tf.shape(nod)) out = self._kgcnn_map_output_ragged([out, node_part], "row_splits", 0) return out
def call(self, inputs, **kwargs): """Forward pass. Args: inputs (tf.ragged): Edge features or message embeddings of shape (batch, [M], F) Returns: tf.tensor: Pooled edges feature list of shape (batch, F). """ dyn_inputs = self._kgcnn_map_input_ragged([inputs], 1) # We cast to values here edge, batchi = dyn_inputs[0].values, dyn_inputs[0].value_rowids() out = kgcnn_ops_segment_operation_by_name(self.pooling_method, edge, batchi) # Output already has correct shape and type return out
def call(self, inputs, **kwargs): """Forward pass. Args: inputs (tf.ragged): Node features of shape (batch, [N], F) Returns: nodes (tf.tensor): Pooled node features of shape (batch, F) """ dyn_inputs = self._kgcnn_map_input_ragged([inputs], 1) # We cast to values here nod, batchi = dyn_inputs[0].values, dyn_inputs[0].value_rowids() out = kgcnn_ops_segment_operation_by_name(self.pooling_method, nod, batchi) # Output should have correct shape return out
def call(self, inputs, **kwargs): """Forward pass. Args: inputs (list): of [node, weights] - nodes (tf.ragged): Node features of shape (batch, [N], F) - weights (tf.ragged): Node or message weights. Most broadcast to nodes. Shape (batch, [N], 1). Returns: nodes (tf.tensor): Pooled node features of shape (batch,F) """ dyn_inputs = self._kgcnn_map_input_ragged(inputs, 2) # We cast to values here nod, batchi = dyn_inputs[0].values, dyn_inputs[0].value_rowids() weights, _ = dyn_inputs[1].values, dyn_inputs[1].value_rowids() nod = tf.math.multiply(nod, weights) out = kgcnn_ops_segment_operation_by_name(self.pooling_method, nod, batchi) # Output should have correct shape return out
def call(self, inputs, **kwargs): """Forward pass. The tensor representation can be tf.RaggedTensor, tf.Tensor or a list of (values, partition). The RaggedTensor has shape (batch, None, F) or in case of equal sized graphs (batch, N, F). For disjoint representation (values, partition), the node embeddings are given by a flatten value tensor of shape (batch*None, F) and a partition tensor of either "row_length", "row_splits" or "value_rowids" that matches the tf.RaggedTensor partition information. In this case the partition_type and node_indexing scheme, i.e. "batch", must be known by the layer. For edge indices, the last dimension holds indices from outgoing to ingoing node (i,j) as a directed edge. Args: inputs (list): of [node, weights] - nodes: Node features of shape (batch, [N], F) - weights: Edge or message weights. Most broadcast to nodes. Returns: nodes (tf.tensor): Pooled node features of shape (batch,F) """ found_node_type = kgcnn_ops_get_tensor_type(inputs[0], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_weight_type = kgcnn_ops_get_tensor_type(inputs[1], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) nod, node_part = kgcnn_ops_dyn_cast(inputs[0], input_tensor_type=found_node_type, output_tensor_type="values_partition", partition_type=self.partition_type) weights, _ = kgcnn_ops_dyn_cast(inputs[1], input_tensor_type=found_weight_type, output_tensor_type="values_partition", partition_type=self.partition_type) batchi = kgcnn_ops_change_partition_type(node_part, self.partition_type, "value_rowids") nod = tf.math.multiply(nod, weights) out = kgcnn_ops_segment_operation_by_name(self.pooling_method, nod, batchi) # Output should have correct shape return out