Exemplo n.º 1
0
def logsumexp(arr, axis=0):
    """Computes the sum of arr assuming arr is in the log domain.
    Returns log(sum(exp(arr))) while minimizing the possibility of
    over/underflow.
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.utils.extmath import logsumexp
    >>> a = np.arange(10)
    >>> np.log(np.sum(np.exp(a)))
    9.4586297444267107
    >>> logsumexp(a)
    9.4586297444267107
    """
    return scipy_logsumexp(arr, axis)
Exemplo n.º 2
0
def logsumexp(arr, axis=0):
    """Computes the sum of arr assuming arr is in the log domain.
    Returns log(sum(exp(arr))) while minimizing the possibility of
    over/underflow.
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.utils.extmath import logsumexp
    >>> a = np.arange(10)
    >>> np.log(np.sum(np.exp(a)))
    9.4586297444267107
    >>> logsumexp(a)
    9.4586297444267107
    """
    return scipy_logsumexp(arr, axis)
Exemplo n.º 3
0
def logsumexp(x, axis=None, keepdims=False):
    """Computes log(sum(exp(elements across dimensions of a tensor))).
    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.
    
    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.
    
    # Returns
        The reduced tensor.
    """
    return scipy_logsumexp(x, axis=axis, keepdims=keepdims)
Exemplo n.º 4
0
def log_sum_exp(x, axis=None, keepdims=False):
    """
    Compute log(sum(exp(x))) in a numerically stable way.

    Examples
    --------
    >>> x = np.arange(10)
    >>> np.log(np.sum(np.exp(x)))  #doctest: +ELLIPSIS
    9.4586297444267...
    >>> log_sum_exp(x)  #doctest: +ELLIPSIS
    9.4586297444267...
    >>> log_sum_exp(x + 1000.) - 1000.  #doctest: +ELLIPSIS
    9.4586297444267...
    >>> log_sum_exp(x - 1000.) + 1000.  #doctest: +ELLIPSIS
    9.4586297444267...
    >>> log_sum_exp(np.random.rand(10, 5), axis=1).shape
    (10,)
    >>> log_sum_exp(np.random.rand(10, 5), axis=1, keepdims=True).shape
    (10, 1)
    """
    return scipy_logsumexp(x, axis=axis, keepdims=keepdims)
Exemplo n.º 5
0
def get_union_bound_factor(n, d):
    """ Compute the natural logarithm of the number of itemsets """
    binoms = []
    for i in range(1, d + 1):
        binoms.append(log_binomial(n, i))
    return scipy_logsumexp(binoms)
Exemplo n.º 6
0
def get_union_bound_factor(n, d):
    """ Compute the natural logarithm of the number of itemsets """
    binoms = []
    for i in range(1,d+1):
        binoms.append(log_binomial(n, i))
    return scipy_logsumexp(binoms)
Exemplo n.º 7
0
 def logsumexp(a):
     return float(scipy_logsumexp(a))
Exemplo n.º 8
0
def logsumexp(v):
	"""
		Logsumexp - our own version wraps the scipy to handle -infs
	"""
	if max(v) > -Infinity: return scipy_logsumexp(v)
	else: return -Infinity