Пример #1
0
def partialset(t, order=1, mask=None, bounds=None):
    """
    Given a tensor, compute another one that contains all partial derivatives of certain order(s) and according to some optional mask.

    :Examples:

    >>> t = tn.rand([10, 10, 10])  # A 3D tensor
    >>> x, y, z = tn.symbols(3)
    >>> partialset(t, 1, x)  # x
    >>> partialset(t, 2, x)  # xx, xy, xz
    >>> partialset(t, 2, tn.only(y | z))  # yy, yz, zz

    :param t: a :class:`Tensor`
    :param order: an int or list of ints. Default is 1
    :param mask: an optional mask to select only a subset of partials
    :param bounds: a list of pairs [lower bound, upper bound] specifying parameter ranges (used to compute derivative steps). If None (default), all steps will be 1

    :return: a :class:`Tensor`
    """

    if bounds is None:
        bounds = [[0, sh - 1] for sh in t.shape]
    if not hasattr(order, '__len__'):
        order = [order]

    max_order = max(order)

    def diff(core, n):
        if core.dim() == 3:
            pad = torch.zeros(core.shape[0], 1, core.shape[2])
        else:
            pad = torch.zeros(1, core.shape[1])
        if core.shape[1] == 1:
            return pad
        step = (bounds[n][1] - bounds[n][0]) / (core.shape[-2] - 1)
        return torch.cat(((core[..., 1:, :] - core[..., :-1, :]) / step, pad),
                         dim=-2)

    cores = []
    idxs = []
    for n in range(t.dim()):
        if t.Us[n] is None:
            stack = [t.cores[n]]
        else:
            stack = [torch.einsum('ijk,aj->iak', (t.cores[n], t.Us[n]))]
        idx = torch.zeros([t.shape[n]])
        for o in range(1, max_order + 1):
            stack.append(diff(stack[-1], n))
            idx = torch.cat((idx, torch.ones(stack[-1].shape[-2]) * o))
            if o == max_order:
                break
        cores.append(torch.cat(stack, dim=-2))
        idxs.append(idx)
    d = tn.Tensor(cores, idxs=idxs)
    wm = tn.automata.weight_mask(t.dim(), order, nsymbols=max_order + 1)
    if mask is not None:
        wm = tn.mask(wm, mask)
    result = tn.mask(d, wm)
    result.idxs = idxs
    return result
Пример #2
0
def truncate_anova(t, mask, keepdim=False, marginals=None):
    """
    Given a tensor and a mask, return the function that results after deleting all ANOVA terms that do not satisfy the
    mask.

    :Example:

    >>> t = ...  # an ND tensor
    >>> x = tn.symbols(t.dim())[0]
    >>> t2 = tn.truncate_anova(t, mask=tn.only(x), keepdim=False)  # This tensor will depend on one variable only

    :param t:
    :param mask:
    :param keepdim: if True, all dummy dimensions will be preserved, otherwise they will disappear. Default is False
    :param marginals: see :func:`anova_decomposition()`

    :return: a :class:`Tensor`
    """

    t = tn.undo_anova_decomposition(tn.mask(tn.anova_decomposition(t, marginals=marginals), mask=mask))
    if not keepdim:
        N = t.dim()
        affecting = torch.sum(torch.tensor(tn.accepted_inputs(mask).double()), dim=0)
        slices = [0 for n in range(N)]
        for i in np.where(affecting)[0]:
            slices[int(i)] = slice(None)
        t = t[slices]
    return t
Пример #3
0
def mean_dimension(t, mask=None, marginals=None):
    """
    Computes the mean dimension of a given tensor with given marginal distributions. This quantity measures how well the
    represented function can be expressed as a sum of low-parametric functions. For example, mean dimension 1 (the
    lowest possible value) means that it is a purely additive function: :math:`f(x_1, ..., x_N) = f_1(x_1) + ... + f_N(x_N)`.

    Assumption: the input variables :math:`x_n` are independently distributed.

    References:

    - R. E. Caflisch, W. J. Morokoff, and A. B. Owen: `"Valuation of Mortgage Backed Securities Using Brownian Bridges to Reduce Effective Dimension" (1997) <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.36.3160>`_

    -  R. Ballester-Ripoll, E. G. Paredes, and R. Pajarola: `"Tensor Algorithms for Advanced Sensitivity Metrics" (2017) <https://epubs.siam.org/doi/10.1137/17M1160252>`_

    :param t: an N-dimensional :class:`Tensor`
    :param marginals: a list of N vectors (will be normalized if not summing to 1). If None (default), uniform distributions are assumed for all variables

    :return: a scalar >= 1
    """

    if mask is None:
        return tn.sobol(t, tn.weight(t.dim()), marginals=marginals)
    else:
        return tn.sobol(
            t, tn.mask(tn.weight(t.dim()), mask),
            marginals=marginals) / tn.sobol(t, mask, marginals=marginals)
Пример #4
0
def only(t):
    """
    Forces all irrelevant symbols to be zero.

    :Example:

    >>> x, y = tn.symbols(2)
    >>> tn.sum(x)  # Result: 2 (x = True, y = False, and x = True, y = True)
    >>> tn.sum(tn.only(x))  # Result: 1 (x = True, y = False)

    :param: a :math:`2^N` :class:`Tensor`

    :return: a masked :class:`Tensor`
    """

    return tn.mask(t, absence(t.dim(), irrelevant_symbols(t)))
Пример #5
0
def sobol(t, mask, marginals=None, normalize=True):
    """
    Compute Sobol indices (as given by a certain mask) for a tensor and independently distributed input variables.

    Reference: R. Ballester-Ripoll, E. G. Paredes, and R. Pajarola: `"Sobol Tensor Trains for Global Sensitivity Analysis" (2017) <https://www.sciencedirect.com/science/article/pii/S0951832018303132?dgcid=rss_sd_all>`_

    :param t: an N-dimensional :class:`Tensor`
    :param mask: an N-dimensional mask
    :param marginals: a list of N vectors (will be normalized if not summing to 1). If None (default), uniform distributions are assumed for all variables
    :param normalize: whether to normalize indices by the total variance of the model (True by default)

    :return: a scalar >= 0
    """

    if marginals is None:
        marginals = [None] * t.dim()

    a = tn.anova_decomposition(t, marginals)
    a -= tn.Tensor([
        torch.cat((torch.ones(1, 1, 1), torch.zeros(1, sh - 1, 1)), dim=1)
        for sh in a.shape
    ]) * a[(0, ) * t.dim()]  # Set empty tuple to 0
    am = a.clone()
    for n in range(t.dim()):
        if marginals[n] is None:
            m = torch.ones([t.shape[n]])
        else:
            m = marginals[n]
        m /= torch.sum(m)  # Make sure each marginal sums to 1
        if am.Us[n] is None:
            if am.cores[n].dim() == 3:
                am.cores[n][:, 1:, :] *= m[None, :, None]
            else:
                am.cores[n][1:, :] *= m[:, None]
        else:
            am.Us[n][1:, :] *= m[:, None]
    am_masked = tn.mask(am, mask)
    if am_masked.cores[-1].shape[-1] > 1:
        am_masked.cores.append(
            torch.eye(am_masked.cores[-1].shape[-1])[:, :, None])
        am_masked.Us.append(None)

    if normalize:
        return tn.dot(a, am_masked) / tn.dot(a, am)
    else:
        return tn.dot(a, am_masked)
Пример #6
0
def dimension_distribution(t, mask=None, order=None, marginals=None):
    """
    Computes the dimension distribution of an ND tensor.

    :param t: ND input :class:`Tensor`
    :param mask: an optional mask :class:`Tensor` to restrict to
    :param order: int, compute only this many order contributions. By default, all N are returned
    :param marginals: PMFs for input variables. By default, uniform distributions

    :return: a PyTorch vector containing N elements
    """

    if order is None:
        order = t.dim()
    if mask is None:
        return tn.sobol(t, tn.weight_one_hot(t.dim(), order+1), marginals=marginals).torch()[1:]
    else:
        mask2 = tn.mask(tn.weight_one_hot(t.dim(), order+1), mask)
        return tn.sobol(t, mask2, marginals=marginals).torch()[1:] / tn.sobol(t, mask, marginals=marginals)