Example #1
0
    def __call__(self, x):
        """
        Returns the Clipped Rectified Linear activation

        Arguments:
            x (Tensor or optree): Input value

        Returns:
            Tensor or optree: output activation
        """
        if self.slope == 0:
            return ng.minimum(ng.maximum(x, 0), self.cutoff)
        else:
            return ng.minimum(
                ng.maximum(x, 0) + self.slope * ng.minimum(x, 0), self.cutoff)
Example #2
0
def clip_gradient_norm(grad_list, clip_norm=None):
    """
    Returns a scaling factor to apply to the gradients.

    The scaling factor is computed such that the root mean squared
    average of the scaled gradients across all layers will be less than
    or equal to the provided clip_norm value. This factor is always <1, so
    never scales up the gradients.

    Arguments:
        param_list (list): List of layer parameters
        clip_norm (float, optional): Target norm for the gradients. If not provided
                                     the returned scale_factor will equal 1

    Returns:
        Computed scale factor (float)
    """
    if clip_norm is None:
        return 1
    else:
        s = None
        for param in grad_list:
            term = ng.squared_L2(param, out_axes=None)
            if s is None:
                s = term
            else:
                s = s + term

        s = ng.sqrt(s)
        return clip_norm / ng.maximum(s, clip_norm)
Example #3
0
    def __call__(self, x):
        """
        Returns the Exponential Linear activation

        Arguments:
            x (Tensor or optree): input value

        Returns:
            Tensor or optree: output activation
        """
        return ng.maximum(x, 0) + self.alpha * (ng.exp(ng.minimum(x, 0)) - 1)
Example #4
0
    def __call__(self, x):
        """
        Returns the Rectified Linear activation

        Arguments:
            x (Tensor or optree): Input value

        Returns:
            Tensor or optree: output activation
        """
        if self.slope == 0:
            return ng.relu(x, axes=x.axes)
        else:
            return ng.maximum(x, 0) + self.slope * ng.minimum(0, x)
Example #5
0
def clip_gradient_value(grad, clip_value=None):
    """
    Element-wise clip a gradient tensor to between ``-clip_value`` and ``+clip_value``.

    Arguments:
        grad (Tensor): List of gradients for a single layer
        clip_value (float, optional): Value to element-wise clip gradients. Default: no clipping

    Returns:
        grad (list): List of clipped gradients.
    """
    if clip_value is None:
        return grad
    else:
        return ng.minimum(ng.maximum(grad, -abs(clip_value)), abs(clip_value))
Example #6
0
def clip_weight_value(weight, clip_value=None, min_value_override=None):
    """
    Element-wise clip a weight tensor to between ``min_value_override`` and ``clip_value``.

    Arguments:
        weight (Tensor): List of gradients for a single layer
        clip_value (float, optional): Value to element-wise clip weights. Default: no clipping
        min_value (float, optional): Value to minimum value to element-wise clip
                                     weights. Default: -abs(clip_value)

    Returns:
        weight (list): List of clipped weights.
    """
    if clip_value is None:
        return weight
    else:
        if min_value_override is None:
            min_value_override = -abs(clip_value)
        return ng.minimum(ng.maximum(weight, min_value_override), abs(clip_value))