Example #1
0
    def _weight_to_image(
            weight: Tensor,
            kernel_channels_last: bool = False) -> Optional[Tensor]:
        """Logs a weight as a TensorBoard image.

        Implementation from TensorFlow codebase, would have invoked theirs directly but they didn't make it a static
        method.
        """
        w_img = squeeze(weight)
        shape = backend.int_shape(w_img)
        if len(shape) == 1:  # Bias case
            w_img = reshape(w_img, [1, shape[0], 1, 1])
        elif len(shape) == 2:  # Dense layer kernel case
            if shape[0] > shape[1]:
                w_img = permute(w_img, [0, 1])
                shape = backend.int_shape(w_img)
            w_img = reshape(w_img, [1, shape[0], shape[1], 1])
        elif len(shape) == 3:  # ConvNet case
            if kernel_channels_last:
                # Switch to channels_first to display every kernel as a separate images
                w_img = permute(w_img, [2, 0, 1])
            w_img = expand_dims(w_img, axis=-1)
        elif len(shape) == 4:  # Conv filter with multiple input channels
            if kernel_channels_last:
                # Switch to channels first to display kernels as separate images
                w_img = permute(w_img, [3, 2, 0, 1])
            w_img = reduce_sum(
                abs(w_img),
                axis=1)  # Sum over the each channel within the kernel
            w_img = expand_dims(w_img, axis=-1)
        shape = backend.int_shape(w_img)
        # Not possible to handle 3D convnets etc.
        if len(shape) == 4 and shape[-1] in [1, 3, 4]:
            return w_img
Example #2
0
 def forward(self, data: List[Tensor], state: Dict[str,
                                                   Any]) -> List[Tensor]:
     results = []
     for elem in data:
         # L1 Distance
         x = reduce_sum(abs(expand_dims(elem, axis=1) - self.labels),
                        axis=-1)
         x = iwd(x, power=1.0, eps=self.eps)
         results.append(x)
     return results
def gather_from_batch(tensor: Tensor, indices: Tensor) -> Tensor:
    """Gather specific indices from a batch of data.

    This method can be useful if you need to compute gradients based on a specific subset of a tensor's output values.
    The `indices` will automatically be cast to the correct type (tf, torch, np) based on the type of the `tensor`.

    This method can be used with Numpy data:
    ```python
    ind = np.array([1, 0, 1])
    n = np.array([[0, 1], [2, 3], [4, 5]])
    b = fe.backend.gather_from_batch(n, ind)  # [1, 2, 5]
    n = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]])
    b = fe.backend.gather_from_batch(n, ind)  # [[2, 3], [4, 5], [10, 11]]
    ```

    This method can be used with TensorFlow tensors:
    ```python
    ind = tf.constant([1, 0, 1])
    t = tf.constant([[0, 1], [2, 3], [4, 5]])
    b = fe.backend.gather_from_batch(t, ind)  # [1, 2, 5]
    t = tf.constant([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]])
    b = fe.backend.gather_from_batch(t, ind)  # [[2, 3], [4, 5], [10, 11]]
    ```

    This method can be used with PyTorch tensors:
    ```python
    ind = torch.tensor([1, 0, 1])
    p = torch.tensor([[0, 1], [2, 3], [4, 5]])
    b = fe.backend.gather_from_batch(p, ind)  # [1, 2, 5]
    p = torch.tensor([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]])
    b = fe.backend.gather_from_batch(p, ind)  # [[2, 3], [4, 5], [10, 11]]
    ```

    Args:
        tensor: A tensor of shape (batch, d1, ..., dn).
        indices: A tensor of shape (batch, ) or (batch, 1) indicating which indices should be selected.

    Returns:
        A tensor of shape (batch, d2, ..., dn) containing the elements from `tensor` at the given `indices`.

    Raises:
        ValueError: If `tensor` is an unacceptable data type.
    """
    if tf.is_tensor(tensor):
        indices = to_tensor(indices, 'tf')
        indices = tf.cast(indices, tf.int64)
        if len(indices.shape) == 1:  # Indices not batched
            indices = expand_dims(indices, 1)
        return tf.gather_nd(tensor, indices=indices, batch_dims=1)
    elif isinstance(tensor, torch.Tensor):
        return tensor[torch.arange(tensor.shape[0]), squeeze(indices)]
    elif isinstance(tensor, np.ndarray):
        return tensor[np.arange(tensor.shape[0]), squeeze(indices)]
    else:
        raise ValueError("Unrecognized tensor type {}".format(type(tensor)))