예제 #1
0
def dist(t1, t2):
    """
    Computes the Euclidean distance between two tensors. Generally faster than `tn.norm(t1-t2)`.

    :param t1: a :class:`Tensor` (or a PyTorch tensor)
    :param t2: a :class:`Tensor` (or a PyTorch tensor)

    :return: a scalar :math:`\ge 0`
    """

    t1, t2 = _process(t1, t2)
    if isinstance(t1, torch.Tensor) and isinstance(t2, torch.Tensor):
        return torch.dist(t1, t2)
    return torch.sqrt(tn.dot(t1, t1) + tn.dot(t2, t2) - 2 * tn.dot(t1, t2).clamp(0))
예제 #2
0
def relative_error(gt, approx):
    """
    Computes the relative error between two tensors (torch or tntorch).

    :param gt: a torch or tntorch tensor
    :param approx: a torch or tntorch tensor

    :return: a scalar :math:`\ge 0`
    """

    gt, approx = _process(gt, approx)
    if isinstance(gt, torch.Tensor) and isinstance(approx, torch.Tensor):
        return torch.dist(gt, approx) / torch.norm(gt)
    dotgt = tn.dot(gt, gt)
    return torch.sqrt((dotgt + tn.dot(approx, approx) - 2*tn.dot(gt, approx)).clamp(0)) / torch.sqrt(dotgt.clamp(0))
예제 #3
0
def dgsm(t, bounds, marginals):
    """
    Compute the derivative-based global sensitivity measure \nu from [1], defined for each i-th variable as:

    $\nu_i := \int_{\Omega} \left(\frac{\partial f}{\partial x_i}\right) \, d\pmb{x}$

    [1] "Derivative-Based Global Sensitivity Measures", by Kucherenko and Iooss (2016)

    :param t: input tensor
    :param bounds: a pair (or list of pairs) of reals, or None. The bounds for each variable
    :param marginals: a list of vectors. If None (default), uniform marginals will be used
    :return: a vector of size N
    """


    if marginals is None:
        marginals = [torch.ones(sh)/sh for sh in t.shape]
    assert all([len(marginals[n]) == t.shape[n] for n in range(t.dim())])
    cores = []
    for n in range(t.dim()):
        # marg = (marginals[n][:-1] + marginals[n][1:]) / 2
        marg = marginals[n]
        marg /= marg.sum()
        # marg = torch.cat([marg, torch.zeros(1)])
        cores.append(marg[None, :, None])
    pdf = tn.Tensor(cores)

    grad = tn.gradient(t, dim='all', bounds=bounds)

    result = torch.zeros(t.dim())
    for n in range(t.dim()):
        result[n] = tn.dot(grad[n]*pdf, grad[n])
    return result
예제 #4
0
def active_subspace(t):
    """
    Compute the main variational directions of a tensor.

    Reference: P. Constantine et al. `"Discovering an Active Subspace in a Single-Diode Solar Cell Model" (2017) <https://arxiv.org/pdf/1406.7607.pdf>`_

    See also P. Constantine's `data set repository <https://github.com/paulcon/as-data-sets/blob/master/README.md>`_.

    :param t: input tensor
    :return: (eigvals, eigvecs): an array and a matrix, encoding the eigenpairs in descending order
    """

    grad = tn.gradient(t, dim='all')

    M = torch.zeros(t.dim(), t.dim())
    for i in range(t.dim()):
        for j in range(i, t.dim()):
            M[i, j] = tn.dot(grad[i], grad[j]) / t.size
            M[j, i] = M[i, j]

    w, v = torch.symeig(M, eigenvectors=True)
    idx = range(t.dim() - 1, -1, -1)
    w = w[idx]
    v = v[:, idx]
    return w, v
예제 #5
0
파일: anova.py 프로젝트: taozerui/tntorch
def sobol(t, mask, marginals=None, normalize=True):
    """
    Compute Sobol indices (as given by a certain mask) for a tensor and independently distributed input variables.

    Reference: R. Ballester-Ripoll, E. G. Paredes, and R. Pajarola: `"Sobol Tensor Trains for Global Sensitivity Analysis" (2017) <https://www.sciencedirect.com/science/article/pii/S0951832018303132?dgcid=rss_sd_all>`_

    :param t: an N-dimensional :class:`Tensor`
    :param mask: an N-dimensional mask
    :param marginals: a list of N vectors (will be normalized if not summing to 1). If None (default), uniform distributions are assumed for all variables
    :param normalize: whether to normalize indices by the total variance of the model (True by default)

    :return: a scalar >= 0
    """

    if marginals is None:
        marginals = [None] * t.dim()

    a = tn.anova_decomposition(t, marginals)
    a -= tn.Tensor([
        torch.cat((torch.ones(1, 1, 1), torch.zeros(1, sh - 1, 1)), dim=1)
        for sh in a.shape
    ]) * a[(0, ) * t.dim()]  # Set empty tuple to 0
    am = a.clone()
    for n in range(t.dim()):
        if marginals[n] is None:
            m = torch.ones([t.shape[n]])
        else:
            m = marginals[n]
        m /= torch.sum(m)  # Make sure each marginal sums to 1
        if am.Us[n] is None:
            if am.cores[n].dim() == 3:
                am.cores[n][:, 1:, :] *= m[None, :, None]
            else:
                am.cores[n][1:, :] *= m[:, None]
        else:
            am.Us[n][1:, :] *= m[:, None]
    am_masked = tn.mask(am, mask)
    if am_masked.cores[-1].shape[-1] > 1:
        am_masked.cores.append(
            torch.eye(am_masked.cores[-1].shape[-1])[:, :, None])
        am_masked.Us.append(None)

    if normalize:
        return tn.dot(a, am_masked) / tn.dot(a, am)
    else:
        return tn.dot(a, am_masked)
예제 #6
0
def normsq(t):
    """
    Computes the squared norm of a :class:`Tensor`.

    :param t: a :class:`Tensor`

    :return: a scalar :math:`\ge 0`
    """

    return tn.dot(t, t)
예제 #7
0
def var(t, marginals=None):
    """
    Computes the variance of a :class:`Tensor`.

    :param t: a :class:`Tensor`
    :param marginals: an optional list of vectors

    :return: a scalar :math:`\ge 0`
    """

    if marginals is not None:
        assert len(marginals) == t.dim()
        tcentered = t - tn.mean(t, marginals=marginals)
        pdf = tn.Tensor(
            [marg[None, :, None] / marg.sum() for marg in marginals])
        return tn.dot(tcentered * pdf, tcentered)

    return tn.normsq(t - tn.mean(t)) / t.numel()
예제 #8
0
def active_subspace(t, bounds, marginals=None):
    """
    Compute the main variational directions of a tensor.

    Reference: P. Constantine et al. `"Discovering an Active Subspace in a Single-Diode Solar Cell Model" (2017) <https://arxiv.org/pdf/1406.7607.pdf>`_

    See also P. Constantine's `data set repository <https://github.com/paulcon/as-data-sets/blob/master/README.md>`_.

    :param t: input tensor
    :param bounds: a pair (or list of pairs) of reals, or None. The bounds for each variable
    :param marginals: a list of vectors. If None (default), uniform marginals will be used
    :return: (eigvals, eigvecs): an array and a matrix, encoding the eigenpairs in descending order
    """

    if t.batch:
        raise ValueError('Batched tensors are not supproted.')

    if marginals is None:
        marginals = [torch.ones(sh)/sh for sh in t.shape]
    assert all([len(marginals[n]) == t.shape[n] for n in range(t.dim())])
    cores = []
    for n in range(t.dim()):
        marg = (marginals[n][:-1] + marginals[n][1:]) / 2
        marg /= marg.sum()
        marg = torch.cat([marg, torch.zeros(1)])
        cores.append(marg[None, :, None])
    pdf = tn.Tensor(cores)

    grad = tn.gradient(t, dim='all', bounds=bounds)

    M = torch.zeros(t.dim(), t.dim())
    for i in range(t.dim()):
        first = grad[i]*pdf
        for j in range(i, t.dim()):
            M[i, j] = tn.dot(first, grad[j])
            M[j, i] = M[i, j]

    w, v = torch.linalg.eigh(M)
    idx = range(t.dim() - 1, -1, -1)
    w = w[idx]
    v = v[:, idx]
    return w, v
예제 #9
0
    def dot(self, other, **kwargs):
        """
        See :func:`metrics.dot()`.
        """

        return tn.dot(self, other, **kwargs)
예제 #10
0
 def check():
     x1 = t1.torch()
     x2 = t2.torch()
     gt = torch.dot(x1.flatten(), x2.flatten())
     assert tn.relative_error(tn.dot(t1, t2), gt) <= 1e-7