Ejemplo n.º 1
0
def test_maximum_bkwd(x, data):
    y = data.draw(hnp.arrays(shape=broadcastable_shape(x.shape, max_dim=5),
                             dtype=float,
                             elements=st.floats(-10., 10.)),
                  label="y")

    assume(not np.any(np.isclose(x, y)))

    x_arr = Tensor(np.copy(x))
    y_arr = Tensor(np.copy(y))
    o = maximum(x_arr, y_arr)

    grad = data.draw(hnp.arrays(shape=o.shape,
                                dtype=float,
                                elements=st.floats(1, 10),
                                unique=True),
                     label="grad")
    (o * grad).sum().backward()

    dx, dy = numerical_gradient_full(np.maximum,
                                     x,
                                     y,
                                     back_grad=grad,
                                     as_decimal=True)

    assert_allclose(x_arr.grad, dx)
    assert_allclose(y_arr.grad, dy)
Ejemplo n.º 2
0
def test_maximum_bkwd_equal():
    """ regression test for documented behavior of maximum/minimum where
        x == y"""

    x = Tensor([1.0, 0.0, 2.0])
    y = Tensor([2.0, 0.0, 1.0])

    o = maximum(x, y)
    o.backward()

    assert_allclose(x.grad, [0.0, 0.0, 1])
    assert_allclose(y.grad, [1.0, 0.0, 0])
    o.null_gradients()
Ejemplo n.º 3
0
def loss(margin, sg, sb):
    """

    :param margin: float
        margin wanted between good embeddings and bad embeddings
    :param sg:
        a good image embedding for a caption
    :param sb:
        a bad image embedding for a caption
    :return:
        the loss
    """
    return mg.maximum(0, margin - (sg - sb))
Ejemplo n.º 4
0
def leaky_relu(x, slope, constant=False):
    """ Returns the leaky rectified linear activation elementwise along x.

    The leaky ReLU is given by `max(x, 0) + slope*min(x, 0)`.

    Parameters
    ----------
    x : mygrad.Tensor
        Input data.

    slope : Union[Real, mygrad.Tensor]
        The slope of the negative activation.

    constant : boolean, optional (default=False)
        If ``True``, the returned tensor is a constant (it
        does not back-propagate a gradient).

    Returns
    -------
    mygrad.Tensor
        The result of apply the "leaky relu" function elementwise to `x`.

    Examples
    --------
    >>> import mygrad as mg
    >>> from mygrad.nnet.activations import leaky_relu
    >>> x = mg.arange(-5, 6)
    >>> x
    Tensor([-5, -4, -3, -2, -1,  0,  1,  2,  3,  4,  5])
    >>> y = leaky_relu(x, slope=0.1); y
    >>> Tensor([-0.5, -0.4, -0.3, -0.2, -0.1,  0. ,  1. ,  2. ,  3. ,  4. ,  5. ])
    >>> y.backward()
    >>> x.grad
    array([0.1, 0.1, 0.1, 0.1, 0.1, 0. , 1. , 1. , 1. , 1. , 1. ])
    """
    if isinstance(slope, (ndarray, Tensor)):
        slope = slope.item()

    if not isinstance(slope, Real):
        raise TypeError(
            f"`slope` must be a real-valued scalar, got {slope} (type { type(slope)})"
        )

    return maximum(
        x, 0, constant=constant) + slope * minimum(x, 0, constant=constant)
Ejemplo n.º 5
0
def leaky_relu(x, slope):
    ''' Returns the leaky rectified linear activation elementwise along x. The leaky ReLU is given
    by max(x, 0) + slope*min(x, 0).

    Parameters
    ----------
    x : mygrad.Tensor
        Input data.

    slope : Union[Real, mygrad.Tensor]
        The slope of the negative activation.

    Returns
    -------
    mygrad.Tensor
        The rectified `x` (elementwise max(x, 0)).
    '''
    return maximum(x, 0) + slope * minimum(x, 0)
Ejemplo n.º 6
0
def simple_loss(x1, x2, y, margin):
    """
    x1 : mygrad.Tensor, shape=(N, D)
    x2 : mygrad.Tensor, shape=(N, D)
    y : Union[int, numpy.ndarray], scalar or shape=(N,)
    margin : float

    Returns
    -------
    mygrad.Tensor, shape=()
    """
    y = np.asarray(y)
    if y.ndim:
        assert y.size == 1 or len(y) == len(x1)
        if x1.ndim == 2:
            y = y.reshape(-1, 1)

    return mg.mean(mg.maximum(0, margin - y * (x1 - x2)))
Ejemplo n.º 7
0
def hard_tanh(x, *, lower_bound=-1, upper_bound=1):
    ''' Returns the hard hypterbolic tangent function.

    The hard_tanh function is `lower_bound` where `x` <= `lower_bound`, `upper_bound` where 
    `x` >= `upper_bound`, and `x` where `lower_bound` < `x` < `upper_bound`.

    Parameters
    ----------
    x : Union[numpy.ndarray, mygrad.Tensor]
        The input, to which to apply the hard tanh function.

    lower_bound : Real, optional (default=-1)
        The lower bound on the hard tanh.

    upper_bound : Real, optional (default=1)
        The upper bound on the hard tanh.

    Returns
    -------
    mygrad.Tensor
        The result of applpying the hard tanh function elementwise to `x`.
    '''
    return maximum(lower_bound, minimum(x, upper_bound))