Beispiel #1
0
def hinge(y_true: Tensor, y_pred: Tensor) -> Tensor:
    """Calculate the hinge loss between two tensors.

    This method can be used with TensorFlow tensors:
    ```python
    true = tf.constant([[-1,1,1,-1], [1,1,1,1], [-1,-1,1,-1], [1,-1,-1,-1]])
    pred = tf.constant([[0.1,0.9,0.05,0.05], [0.1,-0.2,0.0,-0.7], [0.0,0.15,0.8,0.05], [1.0,-1.0,-1.0,-1.0]])
    b = fe.backend.hinge(y_pred=pred, y_true=true)  # [0.8  1.2  0.85 0.  ]
    ```

    This method can be used with PyTorch tensors:
    ```python
    true = torch.tensor([[-1,1,1,-1], [1,1,1,1], [-1,-1,1,-1], [1,-1,-1,-1]])
    pred = torch.tensor([[0.1,0.9,0.05,0.05], [0.1,-0.2,0.0,-0.7], [0.0,0.15,0.8,0.05], [1.0,-1.0,-1.0,-1.0]])
    b = fe.backend.hinge(y_pred=pred, y_true=true)  # [0.8  1.2  0.85 0.  ]
    ```

    Args:
        y_true: Ground truth class labels which should take values of 1 or -1.
        y_pred: Prediction score for each class, with a shape like y_true. dtype: float32 or float16.

    Returns:
        The hinge loss between `y_true` and `y_pred`

    Raises:
        ValueError: If `y_pred` is an unacceptable data type.
    """
    y_true = cast(y_true, 'float32')
    return reduce_mean(clip_by_value(1.0 - y_true * y_pred, min_value=0), axis=-1)
Beispiel #2
0
 def forward(self, data: Tensor, state: Dict[str,
                                             Any]) -> Tuple[Tensor, Tensor]:
     lam = self.beta.sample()
     lam = maximum(lam, (1 - lam))
     cut_x = self.uniform.sample()
     cut_y = self.uniform.sample()
     bbox_x1, bbox_x2, bbox_y1, bbox_y2, width, height = self._get_patch_coordinates(
         data, cut_x, cut_y, lam=lam)
     if tf.is_tensor(data):
         patches = roll(
             data, shift=1,
             axis=0)[:, bbox_y1:bbox_y2,
                     bbox_x1:bbox_x2, :] - data[:, bbox_y1:bbox_y2,
                                                bbox_x1:bbox_x2, :]
         patches = tf.pad(patches, [[0, 0], [bbox_y1, height - bbox_y2],
                                    [bbox_x1, width - bbox_x2], [0, 0]],
                          mode="CONSTANT",
                          constant_values=0)
         data = data + patches
     else:
         data[:, :, bbox_y1:bbox_y2,
              bbox_x1:bbox_x2] = roll(data, shift=1,
                                      axis=0)[:, :, bbox_y1:bbox_y2,
                                              bbox_x1:bbox_x2]
     # adjust lambda to match pixel ratio
     lam = 1 - cast(
         ((bbox_x2 - bbox_x1) *
          (bbox_y2 - bbox_y1)), dtype="float32") / (width * height)
     return data, lam
Beispiel #3
0
    def _get_patch_coordinates(
            tensor: Tensor, x: Tensor, y: Tensor, lam: Tensor
    ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
        """Randomly cut the patches from input images.

        If patches are going to be pasted in other image, combination ratio between two images is defined by `lam`.
        Cropping region indicates where to drop out from the image and `cut_x` & `cut_y` are used to calculate cropping
        region whose aspect ratio is proportional to the original image.

        Args:
            tensor: The input value.
            lam: Combination ratio between two images. Larger the lambda value is smaller the patch would be. A
                scalar tensor containing value between 0 and 1.
            x: X-coordinate in image from which patch needs to be cropped. A scalar tensor containing value between 0
                and 1 which in turn is transformed in the range of image width.
            y: Y-coordinate in image from which patch needs to be cropped. A scalar tensor containing value between 0
                and 1 which in turn is transformed in the range of image height.

        Returns:
            The X and Y coordinates of the cropped patch along with width and height.
        """
        _, img_height, img_width = get_image_dims(tensor)

        cut_x = img_width * x
        cut_y = img_height * y
        cut_w = img_width * tensor_sqrt(1 - lam)
        cut_h = img_height * tensor_sqrt(1 - lam)
        bbox_x1 = cast(
            tensor_round(clip_by_value(cut_x - cut_w / 2, min_value=0)),
            "int32")
        bbox_x2 = cast(
            tensor_round(clip_by_value(cut_x + cut_w / 2,
                                       max_value=img_width)), "int32")
        bbox_y1 = cast(
            tensor_round(clip_by_value(cut_y - cut_h / 2, min_value=0)),
            "int32")
        bbox_y2 = cast(
            tensor_round(clip_by_value(cut_y + cut_h / 2,
                                       max_value=img_height)), "int32")
        return bbox_x1, bbox_x2, bbox_y1, bbox_y2, img_width, img_height
Beispiel #4
0
    def forward(self, data: Union[Tensor, List[Tensor]],
                state: Dict[str, Any]) -> Union[Tensor, List[Tensor]]:
        """Execute a randomly selected op from the list of `numpy_ops`.

        Args:
            data: The information to be passed to one of the wrapped operators.
            state: Information about the current execution context, for example {"mode": "train"}.

        Returns:
            The `data` after application of one of the available numpyOps.
        """
        idx = cast(self.prob_fn.sample(), dtype='int32')
        return self.invoke_fn(idx, data, state)
def convert_input_precision(tensor: Tensor) -> Tensor:
    """
        Adjust the input data precision based of environment precision.

        Args:
            tensor: The input value.

        Returns:
            The precision adjusted data(16 bit for mixed precision, 32 bit otherwise).

    """
    precision = 'float32'

    if mixed_precision.global_policy().compute_dtype == 'float16':
        precision = 'float16'

    return cast(tensor, precision)