Exemple #1
0
def hinge(y_true: Tensor, y_pred: Tensor) -> Tensor:
    """Calculate the hinge loss between two tensors.

    This method can be used with TensorFlow tensors:
    ```python
    true = tf.constant([[-1,1,1,-1], [1,1,1,1], [-1,-1,1,-1], [1,-1,-1,-1]])
    pred = tf.constant([[0.1,0.9,0.05,0.05], [0.1,-0.2,0.0,-0.7], [0.0,0.15,0.8,0.05], [1.0,-1.0,-1.0,-1.0]])
    b = fe.backend.hinge(y_pred=pred, y_true=true)  # [0.8  1.2  0.85 0.  ]
    ```

    This method can be used with PyTorch tensors:
    ```python
    true = torch.tensor([[-1,1,1,-1], [1,1,1,1], [-1,-1,1,-1], [1,-1,-1,-1]])
    pred = torch.tensor([[0.1,0.9,0.05,0.05], [0.1,-0.2,0.0,-0.7], [0.0,0.15,0.8,0.05], [1.0,-1.0,-1.0,-1.0]])
    b = fe.backend.hinge(y_pred=pred, y_true=true)  # [0.8  1.2  0.85 0.  ]
    ```

    Args:
        y_true: Ground truth class labels which should take values of 1 or -1.
        y_pred: Prediction score for each class, with a shape like y_true. dtype: float32 or float16.

    Returns:
        The hinge loss between `y_true` and `y_pred`

    Raises:
        ValueError: If `y_pred` is an unacceptable data type.
    """
    y_true = cast(y_true, 'float32')
    return reduce_mean(clip_by_value(1.0 - y_true * y_pred, min_value=0), axis=-1)
Exemple #2
0
 def forward(self, data: List[Tensor], state: Dict[str, Any]) -> Tensor:
     data, loss = data
     grad = get_gradient(target=loss, sources=data, tape=state['tape'], retain_graph=self.retain_graph)
     adverse_data = clip_by_value(data + self.epsilon * sign(grad),
                                  min_value=self.clip_low or reduce_min(data),
                                  max_value=self.clip_high or reduce_max(data))
     return adverse_data
Exemple #3
0
    def _convert_for_visualization(tensor: Tensor,
                                   tile: int = 99) -> np.ndarray:
        """Modify the range of data in a given input `tensor` to be appropriate for visualization.

        Args:
            tensor: Input masks, whose channel values are to be reduced by absolute value summation.
            tile: The percentile [0-100] used to set the max value of the image.

        Returns:
            A (batch X width X height) image after visualization clipping is applied.
        """
        if isinstance(tensor, torch.Tensor):
            channel_axis = 1
        else:
            channel_axis = -1
        flattened_mask = reduce_sum(abs(tensor),
                                    axis=channel_axis,
                                    keepdims=True)

        non_batch_axes = list(range(len(flattened_mask.shape)))[1:]

        vmax = percentile(flattened_mask,
                          tile,
                          axis=non_batch_axes,
                          keepdims=True)
        vmin = reduce_min(flattened_mask, axis=non_batch_axes, keepdims=True)

        return clip_by_value((flattened_mask - vmin) / (vmax - vmin), 0, 1)
Exemple #4
0
    def _get_patch_coordinates(
            tensor: Tensor, x: Tensor, y: Tensor, lam: Tensor
    ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
        """Randomly cut the patches from input images.

        If patches are going to be pasted in other image, combination ratio between two images is defined by `lam`.
        Cropping region indicates where to drop out from the image and `cut_x` & `cut_y` are used to calculate cropping
        region whose aspect ratio is proportional to the original image.

        Args:
            tensor: The input value.
            lam: Combination ratio between two images. Larger the lambda value is smaller the patch would be. A
                scalar tensor containing value between 0 and 1.
            x: X-coordinate in image from which patch needs to be cropped. A scalar tensor containing value between 0
                and 1 which in turn is transformed in the range of image width.
            y: Y-coordinate in image from which patch needs to be cropped. A scalar tensor containing value between 0
                and 1 which in turn is transformed in the range of image height.

        Returns:
            The X and Y coordinates of the cropped patch along with width and height.
        """
        _, img_height, img_width = get_image_dims(tensor)

        cut_x = img_width * x
        cut_y = img_height * y
        cut_w = img_width * tensor_sqrt(1 - lam)
        cut_h = img_height * tensor_sqrt(1 - lam)
        bbox_x1 = cast(
            tensor_round(clip_by_value(cut_x - cut_w / 2, min_value=0)),
            "int32")
        bbox_x2 = cast(
            tensor_round(clip_by_value(cut_x + cut_w / 2,
                                       max_value=img_width)), "int32")
        bbox_y1 = cast(
            tensor_round(clip_by_value(cut_y - cut_h / 2, min_value=0)),
            "int32")
        bbox_y2 = cast(
            tensor_round(clip_by_value(cut_y + cut_h / 2,
                                       max_value=img_height)), "int32")
        return bbox_x1, bbox_x2, bbox_y1, bbox_y2, img_width, img_height