def call(self, inputs, **kwargs): """Forward pass. Args: inputs (list): of [node, edges, edge_index, weights] - nodes (tf.ragged): Node features of shape (batch, [N], F) - edges (tf.ragged): Edge or message features of shape (batch, [M], F) - edge_index (tf.ragged): Edge indices of shape (batch, [M], 2) - weights (tf.ragged): Edge or message weights. Must broadcast to edges or messages, e.g. (batch, [M], 1) Returns: features: Pooled feature tensor of pooled edge features for each node of shape (batch, [N], F) """ dyn_inputs = self._kgcnn_map_input_ragged(inputs, 4) # We cast to values here nod, node_part = dyn_inputs[0].values, dyn_inputs[0].row_splits edge, _ = dyn_inputs[1].values, dyn_inputs[1].row_lengths() edgeind, edge_part = dyn_inputs[2].values, dyn_inputs[2].row_lengths() weights, _ = dyn_inputs[3].values, dyn_inputs[3].row_lengths() shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition( edgeind, node_part, edge_part, partition_type_node="row_splits", partition_type_edge="row_length", to_indexing='batch', from_indexing=self.node_indexing) wval = weights dens = edge * wval nodind = shiftind[:, 0] if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) wval = tf.gather(wval, node_order, axis=0) # Pooling via e.g. segment_sum get = kgcnn_ops_segment_operation_by_name(self.pooling_method, dens, nodind) if self.normalize_by_weights: get = tf.math.divide_no_nan(get, tf.math.segment_sum(wval, nodind)) # +tf.eps if self.has_unconnected: get = kgcnn_ops_scatter_segment_tensor_nd(get, nodind, tf.shape(nod)) out = self._kgcnn_map_output_ragged([get, node_part], "row_splits", 0) return out
def call(self, inputs, **kwargs): """Forward pass. The tensor representation can be tf.RaggedTensor, tf.Tensor or a list of (values, partition). The RaggedTensor has shape (batch, None, F) or in case of equal sized graphs (batch, N, F). For disjoint representation (values, partition), the node embeddings are given by a flatten value tensor of shape (batch*None, F) and a partition tensor of either "row_length", "row_splits" or "value_rowids" that matches the tf.RaggedTensor partition information. In this case the partition_type and node_indexing scheme, i.e. "batch", must be known by the layer. For edge indices, the last dimension holds indices from outgoing to ingoing node (i,j) as a directed edge. Args: inputs (list): of [node, edges, edge_index] - nodes: Node features of shape (batch, [N], F) - edges: Edge or message features of shape (batch, [N], F) - edge_index: Edge indices of shape (batch, [N], 2) Returns: features: Pooled feature tensor of pooled edge features for each node. """ found_node_type = kgcnn_ops_get_tensor_type(inputs[0], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_edge_type = kgcnn_ops_get_tensor_type(inputs[1], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_index_type = kgcnn_ops_get_tensor_type(inputs[2], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) nod, node_part = kgcnn_ops_dyn_cast(inputs[0], input_tensor_type=found_node_type, output_tensor_type="values_partition", partition_type=self.partition_type) edge, _ = kgcnn_ops_dyn_cast(inputs[1], input_tensor_type=found_edge_type, output_tensor_type="values_partition", partition_type=self.partition_type) edgeind, edge_part = kgcnn_ops_dyn_cast(inputs[2], input_tensor_type=found_index_type, output_tensor_type="values_partition", partition_type=self.partition_type) shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition(edgeind, node_part, edge_part, partition_type_node=self.partition_type, partition_type_edge=self.partition_type, to_indexing='batch', from_indexing=self.node_indexing) nodind = shiftind[:, 0] # Pick first index eg. ingoing dens = edge if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) # Pooling via e.g. segment_sum out = kgcnn_ops_segment_operation_by_name(self.pooling_method, dens, nodind) if self.has_unconnected: out = kgcnn_ops_scatter_segment_tensor_nd(out, nodind, tf.shape(nod)) return kgcnn_ops_dyn_cast([out, node_part], input_tensor_type="values_partition", output_tensor_type=found_node_type, partition_type=self.partition_type)
def call(self, inputs, **kwargs): """Forward pass. Args: inputs: [node, edges, attention, edge_indices] - nodes (tf.ragged): Node features of shape (batch, [N], F) - edges (tf.ragged): Edge or message features of shape (batch, [M], F) - attention (tf.ragged): Attention coefficients of shape (batch, [M], 1) - edge_index (tf.ragged): Edge indices of shape (batch, [M], F) Returns: embeddings: Feature tensor of pooled edge attentions for each node of shape (batch, [N], F) """ dyn_inputs = self._kgcnn_map_input_ragged(inputs, 4) # We cast to values here nod, node_part = dyn_inputs[0].values, dyn_inputs[0].row_lengths() edge = dyn_inputs[1].values attention = dyn_inputs[2].values edgeind, edge_part = dyn_inputs[3].values, dyn_inputs[3].row_lengths() shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition( edgeind, node_part, edge_part, partition_type_node="row_length", partition_type_edge="row_length", to_indexing='batch', from_indexing=self.node_indexing) nodind = shiftind[:, 0] # Pick first index eg. ingoing dens = edge ats = attention if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) ats = tf.gather(ats, node_order, axis=0) # Apply segmented softmax ats = segment_softmax(ats, nodind) get = dens * ats get = tf.math.segment_sum(get, nodind) if self.has_unconnected: # Need to fill tensor since the maximum node may not be also in pooled # Does not happen if all nodes are also connected get = kgcnn_ops_scatter_segment_tensor_nd(get, nodind, tf.shape(nod)) out = self._kgcnn_map_output_ragged([get, node_part], "row_length", 0) return out
def call(self, inputs, **kwargs): """Forward pass. Args: inputs (list): [nodes, edges, edge_index] - nodes (tf.ragged): Node features of shape (batch, [N], F) - edges (tf.ragged): Edge or message features of shape (batch, [M], F) - edge_index (tf.ragged): Edge indices of shape (batch, [M], 2) Returns: features: Feature tensor of pooled edge features for each node of shape (batch, [N], F) """ dyn_inputs = self._kgcnn_map_input_ragged(inputs, 3) # We cast to values here nod, node_part = dyn_inputs[0].values, dyn_inputs[0].row_splits edge, _ = dyn_inputs[1].values, dyn_inputs[1].row_lengths() edgeind, edge_part = dyn_inputs[2].values, dyn_inputs[2].row_lengths() shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition( edgeind, node_part, edge_part, partition_type_node="row_splits", partition_type_edge="row_length", to_indexing='batch', from_indexing=self.node_indexing) nodind = shiftind[:, 0] # Pick first index eg. ingoing dens = edge if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) # Pooling via LSTM # we make a ragged input ragged_lstm_input = tf.RaggedTensor.from_value_rowids(dens, nodind) get = self.lstm_unit(ragged_lstm_input) if self.has_unconnected: # Need to fill tensor since the maximum node may not be also in pooled # Does not happen if all nodes are also connected get = kgcnn_ops_scatter_segment_tensor_nd(get, nodind, tf.shape(nod)) out = self._kgcnn_map_output_ragged([get, node_part], "row_splits", 0) return out
def call(self, inputs, **kwargs): """Forward pass. Args: inputs (list): of [node, edges, edge_index] - nodes (tf.ragged): Node features of shape (batch, [N], F) - edges (tf.ragged): Edge or message features of shape (batch, [M], F) - edge_index (tf.ragged): Edge indices of shape (batch, [M], 2) Returns: features: Pooled feature tensor of pooled edge features for each node. """ dyn_inputs = self._kgcnn_map_input_ragged(inputs, 3) # We cast to values here nod, node_part = dyn_inputs[0].values, dyn_inputs[0].row_splits edge, _ = dyn_inputs[1].values, dyn_inputs[1].row_lengths() edgeind, edge_part = dyn_inputs[2].values, dyn_inputs[2].row_lengths() shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition( edgeind, node_part, edge_part, partition_type_node="row_splits", partition_type_edge="row_length", to_indexing='batch', from_indexing=self.node_indexing) nodind = shiftind[:, 0] # Pick first index eg. ingoing dens = edge if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) # Pooling via e.g. segment_sum out = kgcnn_ops_segment_operation_by_name(self.pooling_method, dens, nodind) if self.has_unconnected: out = kgcnn_ops_scatter_segment_tensor_nd(out, nodind, tf.shape(nod)) out = self._kgcnn_map_output_ragged([out, node_part], "row_splits", 0) return out
def call(self, inputs, **kwargs): """Forward pass. The tensor representation can be tf.RaggedTensor, tf.Tensor or a list of (values, partition). The RaggedTensor has shape (batch, None, F) or in case of equal sized graphs (batch, N, F). For disjoint representation (values, partition), the node embeddings are given by a flatten value tensor of shape (batch*None, F) and a partition tensor of either "row_length", "row_splits" or "value_rowids" that matches the tf.RaggedTensor partition information. In this case the partition_type and node_indexing scheme, i.e. "batch", must be known by the layer. For edge indices, the last dimension holds indices from outgoing to ingoing node (i,j) as a directed edge. Args: inputs: [node, edges, attention, edge_indices] - nodes: Node features of shape (batch, [N], F) - edges: Edge or message features of shape (batch, [N], F) - attention: Attention coefficients of shape (batch, [N], 1) - edge_index: Edge indices of shape (batch, [N], F) Returns: embeddings: Feature tensor of pooled edge attentions for each node. """ found_node_type = kgcnn_ops_get_tensor_type( inputs[0], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_edge_type = kgcnn_ops_get_tensor_type( inputs[1], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_att_type = kgcnn_ops_get_tensor_type( inputs[2], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) found_index_type = kgcnn_ops_get_tensor_type( inputs[3], input_tensor_type=self.input_tensor_type, node_indexing=self.node_indexing) # We cast to values here nod, node_part = kgcnn_ops_dyn_cast( inputs[0], input_tensor_type=found_node_type, output_tensor_type="values_partition", partition_type=self.partition_type) edge, _ = kgcnn_ops_dyn_cast(inputs[1], input_tensor_type=found_edge_type, output_tensor_type="values_partition", partition_type=self.partition_type) attention, _ = kgcnn_ops_dyn_cast( inputs[2], input_tensor_type=found_att_type, output_tensor_type="values_partition", partition_type=self.partition_type) edgeind, edge_part = kgcnn_ops_dyn_cast( inputs[3], input_tensor_type=found_index_type, output_tensor_type="values_partition", partition_type=self.partition_type) shiftind = kgcnn_ops_change_edge_tensor_indexing_by_row_partition( edgeind, node_part, edge_part, partition_type_node=self.partition_type, partition_type_edge=self.partition_type, to_indexing='batch', from_indexing=self.node_indexing) nodind = shiftind[:, 0] # Pick first index eg. ingoing dens = edge ats = attention if not self.is_sorted: # Sort edgeindices node_order = tf.argsort(nodind, axis=0, direction='ASCENDING', stable=True) nodind = tf.gather(nodind, node_order, axis=0) dens = tf.gather(dens, node_order, axis=0) ats = tf.gather(ats, node_order, axis=0) # Apply segmented softmax ats = segment_softmax(ats, nodind) get = dens * ats get = tf.math.segment_sum(get, nodind) if self.has_unconnected: # Need to fill tensor since the maximum node may not be also in pooled # Does not happen if all nodes are also connected get = kgcnn_ops_scatter_segment_tensor_nd(get, nodind, tf.shape(nod)) return kgcnn_ops_dyn_cast([get, node_part], input_tensor_type="values_partition", output_tensor_type=found_node_type, partition_type=self.partition_type)