예제 #1
0
def test_tensors():

    for i in range(100):
        t = random_format([10] * 6)
        t2 = tn.cross(function=lambda x: x,
                      tensors=t,
                      ranks_tt=15,
                      verbose=False)
        assert tn.relative_error(t, t2) < 1e-6
예제 #2
0
def test_tensors():

    for i in range(100):
        t = random_format([10] * 6)
        t2 = tn.cross(function=lambda x: x,
                      tensors=t,
                      ranks_tt=15,
                      verbose=False)
        assert tn.relative_error(t, t2) < 1e-6

    t = tn.rand([10] * 6, ranks_tt=10)
    _, info = tn.cross(function=lambda x: x,
                       tensors=[t],
                       ranks_tt=15,
                       verbose=False,
                       return_info=True)
    t2 = tn.cross_forward(info, function=lambda x: x, tensors=t)
    assert tn.relative_error(t, t2) < 1e-6
예제 #3
0
파일: ops.py 프로젝트: rballester/tntorch
def reciprocal(t):
    """
    Element-wise reciprocal computed using cross-approximation; see PyTorch's `reciprocal()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.reciprocal(x), tensors=t, verbose=False)
예제 #4
0
파일: ops.py 프로젝트: rballester/tntorch
def asin(t):
    """
    Element-wise arcsine computed using cross-approximation; see PyTorch's `asin()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.asin(x), tensors=t, verbose=False)
예제 #5
0
파일: ops.py 프로젝트: rballester/tntorch
def sqrt(t):
    """
    Element-wise square root computed using cross-approximation; see PyTorch's `qrt()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.sqrt(x), tensors=t, verbose=False)
예제 #6
0
파일: ops.py 프로젝트: rballester/tntorch
def log2(t):
    """
    Element-wise base-2 logarithm computed using cross-approximation; see PyTorch's `log2()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.log2(x), tensors=t, verbose=False)
예제 #7
0
파일: ops.py 프로젝트: rballester/tntorch
def exp(t):
    """
    Element-wise exponentiation computed using cross-approximation; see PyTorch's `exp()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.exp(x), tensors=t, verbose=False)
예제 #8
0
파일: ops.py 프로젝트: rballester/tntorch
def erfinv(t):
    """
    Element-wise inverse error function computed using cross-approximation; see PyTorch's `erfinv()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.erfinv(x), tensors=t, verbose=False)
예제 #9
0
파일: ops.py 프로젝트: rballester/tntorch
def cosh(t):
    """
    Element-wise hyperbolic cosine computed using cross-approximation; see PyTorch's `cosh()`.

    :param t: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x: torch.cosh(x), tensors=t, verbose=False)
예제 #10
0
파일: ops.py 프로젝트: rballester/tntorch
def mul(t1, t2):
    """
    Element-wise product computed using cross-approximation; see PyTorch's `mul()`.

    :param t1: input :class:`Tensor`
    :param t2: input :class:`Tensor`

    :return: a :class:`Tensor`
    """

    return tn.cross(lambda x, y: torch.mul(x, y), tensors=[t1, t2], verbose=False)
예제 #11
0
def test_domain():
    def function(Xs):
        return 1. / torch.sum(Xs, dim=1)

    domain = [torch.linspace(1, 10, 10) for n in range(3)]
    t = tn.cross(function=function,
                 domain=domain,
                 ranks_tt=3,
                 function_arg='matrix')
    gt = torch.meshgrid(domain)
    gt = 1. / sum(gt)

    assert tn.relative_error(gt, t) < 5e-2
예제 #12
0
    def __init__(self, t, eps=1e-9, verbose=False):

        # if isinstance(t, tt.core.vector.vector):
        #     t = tn.Tensor([torch.Tensor(c) for c in tt.vector.to_list(t)])

        ###########################################
        # Precompute all 4 types of Sobol indices #
        ###########################################

        t = t
        N = t.dim()
        tsq = t.decompress_tucker_factors()
        for n in range(N):
            tsq.cores[n] = torch.cat(
                [torch.mean(tsq.cores[n], dim=1, keepdim=True), tsq.cores[n]],
                dim=1)
        tsq = tn.cross(tensors=[tsq],
                       function=lambda x: x**2,
                       eps=eps,
                       verbose=verbose)

        st_cores = []
        for n in range(N):
            st_cores.append(
                torch.cat([
                    tsq.cores[n][:, :1, :],
                    torch.mean(tsq.cores[n][:, 1:, :], dim=1, keepdim=True) -
                    tsq.cores[n][:, :1, :]
                ],
                          dim=1))
        st = tn.Tensor(st_cores)
        var = tn.sum(st) - st[(0, ) * N]
        self.st = tn.round_tt(st / var, eps=eps)
        self.st -= tn.none(N) * self.st[(0, ) *
                                        N]  # Set element 0, ..., 0 to zero
        self.sst = tn.Tensor([
            torch.cat([c[:, :1, :] + c[:, 1:2, :], c[:, 1:2, :]], dim=1)
            for c in self.st.cores
        ])
        self.cst = tn.Tensor([
            torch.cat([c[:, :1, :], c[:, :1, :] + c[:, 1:2, :]], dim=1)
            for c in self.st.cores
        ])
        self.tst = 1 - tn.Tensor([
            torch.cat([c[:, :1, :] + c[:, 1:2, :], c[:, :1, :]], dim=1)
            for c in self.st.cores
        ])
예제 #13
0
def data_processing(function_local, axes):
    # # load the model and parameter space
    # model = tr.core.load('./models/' + case + '.npz')
    # cores = tt.vector.to_list(model)
    # cores = [torch.Tensor(i) for i in cores]
    # t = tn.Tensor(cores)
    #
    # with open('./models/' + case + '.json') as f:
    #     data = json.load(f)  # or json.loads(f.read())
    #     N = len(data)  # N = dimensions
    #     para = []  # parameter name
    #     for i in range(len(data)):
    #         temp = data[i]['name']
    #         para.append(temp)

    # ======= depend only on tntorch ======== %
    start2 = time.time()
    N = len(axes)

    domains = [axes[n]['domain'] for n in range(N)]
    tick_num = 64
    domain = []
    for n in range(N):
        domain.append(linspace(domains[n][0], domains[n][1], tick_num))

    t = tn.cross(function=function_local,
                 domain=domain,
                 function_arg='matrix',
                 max_iter=10)

    P = 10000
    x_indices = torch.cat(
        [torch.randint(0, t.shape[n], [P, 1]) for n in range(t.dim())], dim=1)
    x = torch.cat([domain[n][x_indices[:, n:n + 1]] for n in range(t.dim())],
                  dim=1)
    print(
        'RMSE:',
        torch.sqrt(torch.mean((function_local(x) - t[x_indices].torch())**2)))

    para = [axes[n]['name'] for n in range(N)]
    print('computing sobol tensor took only {:g}s'.format(time.time() -
                                                          start2))
    print(max(t.ranks_tt))

    return N, t, para
예제 #14
0
 def __pow__(self, power):
     return tn.cross(function=lambda x, y: x**y, tensors=[self, tn.full_like(self, fill_value=power)], verbose=False)
예제 #15
0
 def __rtruediv__(self, other):
     return tn.cross(function=lambda x, y: x / y, tensors=[tn.full_like(self, fill_value=other), self], verbose=False)
예제 #16
0
 def __truediv__(self, other):
     return tn.cross(function=lambda x, y: x / y, tensors=[self, other], verbose=False)
예제 #17
0
파일: tools.py 프로젝트: rballester/tntorch
def convolve(t1: tn.Tensor, t2: tn.Tensor, mode='full', **kwargs):
    """
    ND convolution of two compressed tensors. Note: this function uses cross-approximation to multiply both tensors in the Fourier frequency domain [1].

    [1] M. Rakhuba, I. Oseledets: "Fast multidimensional convolution in low-rank formats via cross approximation" (2014)

    :param t1: a `tn.Tensor`
    :param t2: a `tn.Tensor`
    :param mode: 'full' (default), 'same', or 'valid'. See `np.convolve`
    :param kwargs: to be passed to the cross-approximation
    :return: a `tn.Tensor`
    """

    N = t1.dim()
    assert N == t2.dim()

    t1 = t1.decompress_tucker_factors()
    t2 = t2.decompress_tucker_factors()
    t1f = tn.Tensor([
        torch.fft.fft(t1.cores[n], n=t1.shape[n] + t2.shape[n] - 1, dim=1)
        for n in range(N)
    ])
    t2f = tn.Tensor([
        torch.fft.fft(t2.cores[n], n=t1.shape[n] + t2.shape[n] - 1, dim=1)
        for n in range(N)
    ])

    def multr(x, y):
        a = torch.real(x)
        b = torch.imag(x)
        c = torch.real(y)
        d = torch.imag(y)
        return a * c - b * d

    def multi(x, y):
        a = torch.real(x)
        b = torch.imag(x)
        c = torch.real(y)
        d = torch.imag(y)
        return b * c + a * d

    t12fr = tn.cross(tensors=[t1f, t2f], function=multr, **kwargs)
    t12fi = tn.cross(tensors=[t1f, t2f], function=multi, **kwargs)
    t12fi.cores[-1] = t12fi.cores[-1] * 1j
    t12r = tn.Tensor([torch.fft.ifft(t12fr.cores[n], dim=1) for n in range(N)])
    t12i = tn.Tensor([torch.fft.ifft(t12fi.cores[n], dim=1) for n in range(N)])
    t12 = tn.cross(tensors=[t12r, t12i],
                   function=lambda x, y: torch.real(x) + torch.real(y),
                   **kwargs)

    # Crop as needed
    if mode == 'same':
        for n in range(N):
            k = min(t1.shape[n], t2.shape[n])
            t12.cores[n] = t12.cores[n][:, k // 2:k // 2 +
                                        max(t1.shape[n], t2.shape[n]), :]
    elif mode == 'valid':
        for n in range(N):
            k = min(t1.shape[n], t2.shape[n])
            t12.cores[n] = t12.cores[n][:, k - 1:-(k - 1), :]

    return t12
예제 #18
0
    def __init__(self, t, verbose=False):

        N = t.dim()
        self.N = N

        # Center the model (make it have mean 0)
        t -= tn.mean(t)

        # Compute the directional function: x1 + ... + xk for
        # every subset of variables
        cores = []
        for n in range(N):
            I = t.shape[n]
            c = torch.eye(2)[:, None, :].repeat(1, 2 * I, 1)
            c[1, I:, 0] = torch.linspace(0, 1, I)
            cores.append(c)
        cores[0] = cores[0][1:2, ...]
        cores[N - 1] = cores[N - 1][..., 0:1]
        vecs = tn.Tensor(cores)

        # Center all directional functions (make them have mean 0)
        cores = []
        for n in range(N):
            I = t.shape[n]
            c1 = torch.mean(vecs.cores[n][:, :I, :], dim=1,
                            keepdim=True).repeat(1, I, 1)
            c2 = torch.mean(vecs.cores[n][:, I:, :], dim=1,
                            keepdim=True).repeat(1, I, 1)
            cores.append(torch.cat([c1, c2], dim=1))
        vecs_means = tn.Tensor(cores)
        vecs -= vecs_means

        # Compute the variance of all directional functions
        vecs_sq = vecs * vecs
        cores = []
        for n in range(N):
            I = t.shape[n]
            c1 = torch.mean(vecs_sq.cores[n][:, :I, :], dim=1, keepdim=True)
            c2 = torch.mean(vecs_sq.cores[n][:, I:, :], dim=1, keepdim=True)
            cores.append(torch.cat([c1, c2], dim=1))
        vecs_variance = tn.Tensor(cores)
        vecs_variance += tn.none(N)  # To avoid division by 0

        # Compute covariances between the model and all directional functions
        trep = tn.Tensor([c.repeat(1, 2, 1) for c in t.cores])
        covs = tn.cross(tensors=[trep, vecs],
                        function=lambda x, y: x * y,
                        verbose=verbose)
        for n in range(N):
            I = t.shape[n]
            c1 = torch.mean(covs.cores[n][:, :I, :], dim=1, keepdim=True)
            c2 = torch.mean(covs.cores[n][:, I:, :], dim=1, keepdim=True)
            covs.cores[n] = torch.cat([c1, c2], dim=1)

        # Tensor containing all 2^N desired indices
        result = tn.cross(tensors=[covs, vecs_variance],
                          function=lambda x, y: x / torch.sqrt(y),
                          verbose=verbose)

        # Normalize result so that the largest index in absolute value is 1.
        # That should be useful for color coding
        result /= max(torch.abs(tn.minimum(result)),
                      torch.abs(tn.maximum(result)))

        self.result = result