def call(
        self,
        inputs: Tuple[Union[tf.Tensor, List[tf.Tensor], tf.SparseTensor]],
    ) -> tf.Tensor:
        """Returns called Graph Convolution Layer.

        Parameters
        ---------------------------
        inputs: Tuple[Union[tf.Tensor, tf.SparseTensor]],
        """
        adjacency, node_features = inputs[0], inputs[1:]

        ids = tf.SparseTensor(indices=adjacency.indices,
                              values=adjacency.indices[:, 1],
                              dense_shape=adjacency.dense_shape)

        return [
            self._l2_norm(
                dense(
                    embedding_ops.embedding_lookup_sparse_v2(
                        self._dropout_layer(node_feature),
                        ids,
                        adjacency,
                        combiner='mean')))
            for dense, node_feature in zip(self._dense_layers, node_features)
        ]
Example #2
0
    def call(self, inputs):
        if inputs.dtype.base_dtype != self._compute_dtype_object.base_dtype:
            inputs = math_ops.cast(inputs, dtype=self._compute_dtype_object)

        rank = inputs.shape.rank
        if rank == 2 or rank is None:
            # We use embedding_lookup_sparse as a more efficient matmul operation for
            # large sparse input tensors. The op will result in a sparse gradient, as
            # opposed to sparse_ops.sparse_tensor_dense_matmul which results in dense
            # gradients. This can lead to sigfinicant speedups, see b/171762937.
            if isinstance(inputs, sparse_tensor.SparseTensor):
                # We need to fill empty rows, as the op assumes at least one id per row.
                inputs, _ = sparse_ops.sparse_fill_empty_rows(inputs, 0)
                # We need to do some munging of our input to use the embedding lookup as
                # a matrix multiply. We split our input matrix into separate ids and
                # weights tensors. The values of the ids tensor should be the column
                # indices of our input matrix and the values of the weights tensor
                # can continue to the actual matrix weights.
                # The column arrangement of ids and weights
                # will be summed over and does not matter. See the documentation for
                # sparse_ops.sparse_tensor_dense_matmul a more detailed explanation
                # of the inputs to both ops.
                ids = sparse_tensor.SparseTensor(
                    indices=inputs.indices,
                    values=inputs.indices[:, 1],
                    dense_shape=inputs.dense_shape)
                weights = inputs
                outputs = embedding_ops.embedding_lookup_sparse_v2(
                    self.kernel * self.window, ids, weights, combiner='sum')
            else:
                outputs = gen_math_ops.MatMul(a=inputs,
                                              b=self.kernel * self.window)
        # Broadcast kernel to inputs.
        else:
            outputs = standard_ops.tensordot(inputs, self.kernel * self.window,
                                             [[rank - 1], [0]])
            # Reshape the output back to the original ndim of the input.
            if not context.executing_eagerly():
                shape = inputs.shape.as_list()
                output_shape = shape[:-1] + [self.kernel.shape[-1]]
                outputs.set_shape(output_shape)

        if self.use_bias:
            outputs = nn_ops.bias_add(outputs, self.bias)

        if self.activation is not None:
            outputs = self.activation(outputs)
        return outputs
Example #3
0
def _embedding_lookup_for_sparse_tensor(
        inp: sparse_tensor.SparseTensor,
        weight: Optional[sparse_tensor.SparseTensor],
        table: tf_variables.Variable,
        feature: tpu_embedding_v2_utils.FeatureConfig) -> ops.Tensor:
    """Embedding lookup for sparse tensor based on its feature config.

  Args:
    inp: a single SparseTensor input.
    weight: None or SparseTensor which has the same shape of the input.
    table: a table variable.
    feature: a feature config.

  Returns:
    Embedding lookup result.
  """
    if not feature.output_shape and feature.max_sequence_length > 0:
        batch_size = math_ops.cast(array_ops.shape(inp)[0], dtype=dtypes.int64)
        sparse_shape = array_ops.stack(
            [batch_size, feature.max_sequence_length], axis=0)
        # TPU Embedding truncates sequences to max_sequence_length, and if we
        # don't truncate, scatter_nd will error out if the index was out of
        # bounds.
        truncated_inp = sparse_ops.sparse_slice(inp,
                                                start=[0, 0],
                                                size=sparse_shape)

        dense_output_shape = array_ops.stack(
            [batch_size, feature.max_sequence_length, feature.table.dim],
            axis=0)
        return array_ops.scatter_nd(
            truncated_inp.indices,
            array_ops.gather(table.read_value(), truncated_inp.values),
            dense_output_shape)
    else:
        inp_rank = inp.dense_shape.get_shape()[0]
        if (not feature.validate_weights_and_indices and inp_rank is not None
                and inp_rank <= 2):
            return embedding_ops.embedding_lookup_sparse_v2(
                table, inp, sp_weights=weight, combiner=feature.table.combiner)
        else:
            return embedding_ops.safe_embedding_lookup_sparse_v2(
                table,
                inp,
                sparse_weights=weight,
                combiner=feature.table.combiner)
 def sparse_lookup():
   sp_ids = sparse_tensor.SparseTensor(
       indices=[[0, 0], [0, 1], [1, 0], [2, 2]],
       values=[0, 3, 4, 1],
       dense_shape=[3, 3])
   return embedding_ops.embedding_lookup_sparse_v2(sv, sp_ids, None)
Example #5
0
def dense(inputs, kernel, bias=None, activation=None, dtype=None):
    """Densely connected NN layer op.

  Args:
    inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation.
    kernel: `tf.Variable`. Matrix kernel.
    bias: (Optional) `tf.Variable`. Bias to add to outputs.
    activation: (Optional) 1-argument callable. Activation function to apply to
      outputs.
    dtype: (Optional) `tf.DType`. Dtype to cast `inputs` to.

  Returns:
    `tf.Tensor`. Output of dense connection.
  """
    if dtype:
        if inputs.dtype.base_dtype != dtype.base_dtype:
            inputs = math_ops.cast(inputs, dtype=dtype)

    rank = inputs.shape.rank
    if rank == 2 or rank is None:
        # We use embedding_lookup_sparse as a more efficient matmul operation for
        # large sparse input tensors. The op will result in a sparse gradient, as
        # opposed to sparse_ops.sparse_tensor_dense_matmul which results in dense
        # gradients. This can lead to sigfinicant speedups, see b/171762937.
        if isinstance(inputs, sparse_tensor.SparseTensor):
            # We need to fill empty rows, as the op assumes at least one id per row.
            inputs, _ = sparse_ops.sparse_fill_empty_rows(inputs, 0)
            # We need to do some munging of our input to use the embedding lookup as a
            # matrix multiply. We split our input matrix into separate ids and weights
            # tensors. The values of the ids tensor should be the column indices of
            # our input matrix and the values of the weights tensor can continue to
            # the actual matrix weights. The column arrangement of ids and weights
            # will be summed over and does not matter. See the documentation for
            # sparse_ops.sparse_tensor_dense_matmul a more detailed explanation of the
            # inputs to both ops.
            ids = sparse_tensor.SparseTensor(indices=inputs.indices,
                                             values=inputs.indices[:, 1],
                                             dense_shape=inputs.dense_shape)
            weights = inputs
            outputs = embedding_ops.embedding_lookup_sparse_v2(kernel,
                                                               ids,
                                                               weights,
                                                               combiner="sum")
        else:
            outputs = gen_math_ops.MatMul(a=inputs, b=kernel)
    # Broadcast kernel to inputs.
    else:
        outputs = standard_ops.tensordot(inputs, kernel, [[rank - 1], [0]])
        # Reshape the output back to the original ndim of the input.
        if not context.executing_eagerly():
            shape = inputs.shape.as_list()
            output_shape = shape[:-1] + [kernel.shape[-1]]
            outputs.set_shape(output_shape)

    if bias is not None:
        outputs = nn_ops.bias_add(outputs, bias)

    if activation is not None:
        outputs = activation(outputs)

    return outputs