コード例 #1
0
def mean_squared_loss(outputs, targets):
    """ Returns the mean squared error Σ(xᵢ - yᵢ)² over the data points. 

    Parameters
    ----------
    outputs : mygrad.Tensor, shape=(N, any)
        The model outputs, where `N` is the number of items.

    targets : mygrad.Tensor, shape=(N, any)
        The target values, where `N` is the number of items.

    Returns
    -------
    mygrad.Tensor, shape=()
        The mean squared error.

    Extended Description
    --------------------
    The mean squared error is given by

    .. math::
        \frac{1}{N}\sum\limits_{1}^{N}(x_i - y_i)^2

    where :math:`N` is the number of elements in `x` and `y`.
    """
    return mean((outputs - targets)**2)
コード例 #2
0
ファイル: focal_loss.py プロジェクト: davidmascharka/MyNN
def focal_loss(scores, targets, *, alpha=1, gamma=0):
    """ Return the focal loss.

    Parameters
    ----------
    scores : mygrad.Tensor, shape=(N, C)
        The C class scores for each of the N pieces of data.

    targets : Sequence[int], shape=(N,)
        The correct class indices, in [0, C), for each datum.

    alpha : Real, optional (default=1)
        The ɑ weighting factor in the loss formulation.

    gamma : Real, optional (default=0)
        The ɣ focusing parameter. Note that for Ɣ=0 and ɑ=1, this is cross-entropy loss.

    Returns
    -------
    mygrad.Tensor
        The average focal loss.

    Notes
    -----
    This function does not perform a softmax before computing the loss. If you ned to take the 
    softmax before computing the loss, see :class:`SoftmaxFocalLoss` instead.
    """
    if isinstance(targets, Tensor):
        targets = targets.data

    label_locs = (range(len(targets)), targets)
    pc = scores[label_locs]
    return -mean(alpha * (1 - pc + 1e-14)**gamma * log(pc))
コード例 #3
0
def l1_loss(outputs, targets):
    ''' Returns the L¹ loss Σ|xᵢ - yᵢ| averaged over the number of data points. 

    Parameters
    ----------
    outputs : mygrad.Tensor, shape=(N,)
        The predictions for each of the N pieces of data.

    targets : numpy.ndarray, shape=(N,)
        The correct value for each of the N pieces of data.

    Returns
    -------
    mygrad.Tensor, shape=()
        The average L¹ loss.

    Extended Description
    --------------------
    The L1 loss is given by
    
    .. math::
        \frac{1}{N}\sum\limits_{1}^{N}|x_i - y_i|

    where :math:`N` is the number of elements in `x` and `y`.
    '''
    return mean(abs(outputs - targets))
コード例 #4
0
def _var(x, keepdims=False, axis=None, ddof=0):
    """Defines variance without using abs. Permits use of
    complex-step numerical derivative."""
    def mean(y, keepdims=False, axis=None, ddof=0):
        N = y.size if axis is None else np.prod([y.shape[i] for i in axis])
        return y.sum(keepdims=keepdims, axis=axis) / (N - ddof)

    return mean(
        (x - x.mean(axis=axis, keepdims=True))**2,
        keepdims=keepdims,
        axis=axis,
        ddof=ddof,
    )
コード例 #5
0
def _std(x, keepdims=False, axis=None, ddof=0):
    """Defines standard dev without using abs. Permits use of
    complex-step numerical derivative."""
    def mean(y, keepdims=False, axis=None, ddof=0):
        if isinstance(axis, int):
            axis = (axis, )
        N = y.size if axis is None else np.prod([y.shape[i] for i in axis])
        return y.sum(keepdims=keepdims, axis=axis) / (N - ddof)

    return np.sqrt(
        mean(
            (x - x.mean(axis=axis, keepdims=True))**2,
            keepdims=keepdims,
            axis=axis,
            ddof=ddof,
        ))
コード例 #6
0
def l2loss(pred, actual):  # L2 loss function (mean square distance)
    """

    Parameters
    ----------
    pred: Union[mygrad.Tensor, numpy.ndarray]
        A tensor or numpy array containing the model's predicted values
    actual: Union[mygrad.Tensor, numpy.ndarray]
        A tensor or numpy array containing the actual values

    Returns
    -------
    mg.Tensor
        A tensor containing the mean square distance between the prediction and actual values.
    """
    return mg.mean(mg.square(pred - actual))
コード例 #7
0
def binary_cross_entropy(y_pred, y_truth):
    """ Calculates the binary cross entropy loss for a given set of predictions.
    
    Parameters
    ----------
    y_pred: mg.Tensor, shape=
        The Tensor of class scores output from the model
    
    y_truth: mg.Tensor, shape=
        A constant Tensor or a NumPy array that contains the truth values for each prediction
    
    Returns
    -------
    mg.Tensor, shape=()
        A zero-dimensional tensor that is the loss
    """
    return -mg.mean(y_truth * mg.log(y_pred + 1e-08) + (1 - y_truth) * mg.log(1 - y_pred + 1e-08)) # <COGLINE>
コード例 #8
0
ファイル: test_batchnorm.py プロジェクト: flappyBug/MyGrad
def simple_batchnorm(x, gamma, beta, eps):
    axes = [i for i in range(x.ndim)]
    axes.pop(1)  # every axis except 1
    axes = tuple(axes)
    keepdims_shape = tuple(1 if n != 1 else d for n, d in enumerate(x.shape))

    mean = mg.mean(x, axis=axes, keepdims=True)
    var = mg.var(x, axis=axes, keepdims=True)
    norm = (x - mean) / mg.sqrt(var + eps)

    if gamma is not None:
        gamma = gamma.reshape(keepdims_shape)
        norm *= gamma

    if beta is not None:
        beta = beta.reshape(keepdims_shape)
        norm += beta
    return norm
コード例 #9
0
def simple_loss(x1, x2, y, margin):
    """
    x1 : mygrad.Tensor, shape=(N, D)
    x2 : mygrad.Tensor, shape=(N, D)
    y : Union[int, numpy.ndarray], scalar or shape=(N,)
    margin : float

    Returns
    -------
    mygrad.Tensor, shape=()
    """
    y = np.asarray(y)
    if y.ndim:
        assert y.size == 1 or len(y) == len(x1)
        if x1.ndim == 2:
            y = y.reshape(-1, 1)

    return mg.mean(mg.maximum(0, margin - y * (x1 - x2)))
コード例 #10
0
ファイル: kl_divergence.py プロジェクト: IanCoolidge0/MyNN
def kl_divergence(outputs, targets):
    ''' Returns the Kullback-Leibler divergence loss from the outputs to the targets.
    
    The KL-Divergence loss for a single sample is given by yᵢ⊙(log(yᵢ) - xᵢ)

    Parameters
    ----------
    outputs : mygrad.Tensor, shape=(N, any)
        The model outputs for each of the N pieces of data.

    targets : numpy.ndarray, shape=(N, any)
        The correct vaue for each datum.

    Returns
    -------
    mygrad.Tensor, shape=()
        The mean Kullback-Leibler divergence.
    '''
    return mean(targets * (log(targets) - outputs))
コード例 #11
0
def negative_log_likelihood(x, y_true, *, weights=None, constant=False):
    """ Returns the (weighted) negative log-likelihood loss between log-probabilities and y_true.

    Note that this does not compute a softmax, so you should input log-probabilities to this.
    See ``softmax_crossentropy`` if you need your loss to compute a softmax.

    Parameters
    ----------
    x : array_like, shape=(N, C)
        The C log-probabilities for each of the N pieces of data.

    y_true : array_like, shape=(N,)
        The correct class indices, in [0, C), for each datum.

    weights : array_like, shape=(C,) optional (default=None)
        The weighting factor to use on each class, or None.

    constant : bool, optional(default=False)
        If ``True``, the returned tensor is a constant (it
        does not back-propagate a gradient)

    Returns
    -------
    mygrad.Tensor, shape=()
        The average (weighted) negative log-likelihood loss.

    Examples
    --------
    >>> import mygrad as mg
    >>> from mygrad.nnet import negative_log_likelihood

    Let's take a simple case where N=1, and C=3. We'll thus make up classification
    scores for a single datum. Suppose the scores are identical for the three classes
    and that the true class is class-0, so that the log-probs are each 1/3:

    >>> logprob = mg.log(1 / 3).item()
    >>> x = mg.Tensor([[logprob, logprob, logprob]])  # a shape-(1, 3) tensor of log-probabilities
    >>> y_true = mg.Tensor([0])  # the correct class for this datum is class-0
    >>> negative_log_likelihood(x, y_true)
    Tensor(1.09861229)

    # log-probabilities where the prediction is highly-confident and correct
    >>> x = mg.Tensor([[0, -20, -20]])
    >>> negative_log_likelihood(x, y_true)
    Tensor(0.)

    # adding a class-weighting
    >>> x = mg.Tensor([[-4.6, -4.6, -0.02]])
    >>> weights = mg.Tensor([2, 1, 1])
    >>> negative_log_likelihood(x, y_true, weights=weights)
    Tensor(9.2)
    """
    if isinstance(y_true, Tensor):
        y_true = y_true.data
    check_loss_inputs(x, y_true)

    if weights is None:
        weights = np.ones(x.shape[1])

    weights = asarray(weights)

    if weights.ndim != 1 or weights.shape[0] != x.shape[1]:
        raise ValueError("`weights` must be a shape-(C,) array: \n"
                         f"\tExpected shape-{x.shape[1]}\n"
                         f"\tGot shape-{y_true.shape}")

    label_locs = (range(len(y_true)), y_true)
    factors = weights[y_true]
    return -mean(x[label_locs] * factors, constant=constant)