예제 #1
0
def test_minimum_bkwd(x, data):
    """ index conforms strictly to basic indexing """

    y = data.draw(hnp.arrays(shape=broadcastable_shape(x.shape, max_dim=5),
                             dtype=float,
                             elements=st.floats(-10., 10.)),
                  label="y")

    assume(not np.any(np.isclose(x, y)))

    x_arr = Tensor(np.copy(x))
    y_arr = Tensor(np.copy(y))
    o = minimum(x_arr, y_arr)

    grad = data.draw(hnp.arrays(shape=o.shape,
                                dtype=float,
                                elements=st.floats(1, 10),
                                unique=True),
                     label="grad")
    (o * grad).sum().backward()

    dx, dy = numerical_gradient_full(np.minimum,
                                     x,
                                     y,
                                     back_grad=grad,
                                     as_decimal=True)

    assert_allclose(x_arr.grad, dx)
    assert_allclose(y_arr.grad, dy)
예제 #2
0
def test_minimum_bkwd_equal():
    """ regression test for documented behavior of minimum/minimum where
        x == y"""

    x = Tensor([1.0, 0.0, 2.0])
    y = Tensor([2.0, 0.0, 1.0])

    o = minimum(x, y)
    o.backward()

    assert_allclose(x.grad, [1.0, 0.0, 0.0])
    assert_allclose(y.grad, [0.0, 0.0, 1.0])
    o.null_gradients()
예제 #3
0
def leaky_relu(x, slope, constant=False):
    """ Returns the leaky rectified linear activation elementwise along x.

    The leaky ReLU is given by `max(x, 0) + slope*min(x, 0)`.

    Parameters
    ----------
    x : mygrad.Tensor
        Input data.

    slope : Union[Real, mygrad.Tensor]
        The slope of the negative activation.

    constant : boolean, optional (default=False)
        If ``True``, the returned tensor is a constant (it
        does not back-propagate a gradient).

    Returns
    -------
    mygrad.Tensor
        The result of apply the "leaky relu" function elementwise to `x`.

    Examples
    --------
    >>> import mygrad as mg
    >>> from mygrad.nnet.activations import leaky_relu
    >>> x = mg.arange(-5, 6)
    >>> x
    Tensor([-5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5])
    >>> y = leaky_relu(x, slope=0.1); y
    >>> Tensor([-0.5, -0.4, -0.3, -0.2, -0.1,  0. ,  1. ,  2. ,  3. ,  4. ,  5. ])
    >>> y.backward()
    >>> x.grad
    array([0.1, 0.1, 0.1, 0.1, 0.1, 0. , 1. , 1. , 1. , 1. , 1. ])
    """
    if isinstance(slope, (ndarray, Tensor)):
        slope = slope.item()

    if not isinstance(slope, Real):
        raise TypeError(
            f"`slope` must be a real-valued scalar, got {slope} (type { type(slope)})"
        )

    return maximum(
        x, 0, constant=constant) + slope * minimum(x, 0, constant=constant)
예제 #4
0
def leaky_relu(x, slope):
    ''' Returns the leaky rectified linear activation elementwise along x. The leaky ReLU is given
    by max(x, 0) + slope*min(x, 0).

    Parameters
    ----------
    x : mygrad.Tensor
        Input data.

    slope : Union[Real, mygrad.Tensor]
        The slope of the negative activation.

    Returns
    -------
    mygrad.Tensor
        The rectified `x` (elementwise max(x, 0)).
    '''
    return maximum(x, 0) + slope * minimum(x, 0)
예제 #5
0
def hard_tanh(x, *, lower_bound=-1, upper_bound=1):
    ''' Returns the hard hypterbolic tangent function.

    The hard_tanh function is `lower_bound` where `x` <= `lower_bound`, `upper_bound` where 
    `x` >= `upper_bound`, and `x` where `lower_bound` < `x` < `upper_bound`.

    Parameters
    ----------
    x : Union[numpy.ndarray, mygrad.Tensor]
        The input, to which to apply the hard tanh function.

    lower_bound : Real, optional (default=-1)
        The lower bound on the hard tanh.

    upper_bound : Real, optional (default=1)
        The upper bound on the hard tanh.

    Returns
    -------
    mygrad.Tensor
        The result of applpying the hard tanh function elementwise to `x`.
    '''
    return maximum(lower_bound, minimum(x, upper_bound))