Exemple #1
0
def test_tucker_cp_tensor():
    a = torch.rand(10, 5, 5, 5, 5)
    b = tn.Tensor(a, ranks_tucker=3, ranks_cp=4, batch=True)

    for i in range(len(a)):
        c = tn.Tensor(a[i], ranks_tucker=3, ranks_cp=4, batch=False)

        assert torch.norm(c.torch() - b.torch()[i]) < 1e1
Exemple #2
0
def test_tucker_tensor():
    a = torch.rand(10, 5, 5, 5, 5)
    b = tn.Tensor(a, ranks_tucker=3, batch=True)

    for i in range(len(a)):
        c = tn.Tensor(a[i], ranks_tucker=3, batch=False)

        for j, core in enumerate(c.cores):
            assert torch.allclose(core, b.cores[j][i, ...])

        assert torch.allclose(c.torch(), b.torch()[i])
Exemple #3
0
def test_cp_tensor():
    a = torch.rand(10, 5, 5, 5, 5)
    b = tn.Tensor(a, ranks_cp=3, batch=True)

    for i in range(len(a)):
        c = tn.Tensor(a[i], ranks_cp=3, batch=False)

        for j, core in enumerate(c.cores):
            assert torch.norm(core - b.cores[j][i, ...]) < 1e1

        assert torch.norm(c.torch() - b.torch()[i]) < 1e1
Exemple #4
0
def dgsm(t, bounds, marginals):
    """
    Compute the derivative-based global sensitivity measure \nu from [1], defined for each i-th variable as:

    $\nu_i := \int_{\Omega} \left(\frac{\partial f}{\partial x_i}\right) \, d\pmb{x}$

    [1] "Derivative-Based Global Sensitivity Measures", by Kucherenko and Iooss (2016)

    :param t: input tensor
    :param bounds: a pair (or list of pairs) of reals, or None. The bounds for each variable
    :param marginals: a list of vectors. If None (default), uniform marginals will be used
    :return: a vector of size N
    """


    if marginals is None:
        marginals = [torch.ones(sh)/sh for sh in t.shape]
    assert all([len(marginals[n]) == t.shape[n] for n in range(t.dim())])
    cores = []
    for n in range(t.dim()):
        # marg = (marginals[n][:-1] + marginals[n][1:]) / 2
        marg = marginals[n]
        marg /= marg.sum()
        # marg = torch.cat([marg, torch.zeros(1)])
        cores.append(marg[None, :, None])
    pdf = tn.Tensor(cores)

    grad = tn.gradient(t, dim='all', bounds=bounds)

    result = torch.zeros(t.dim())
    for n in range(t.dim()):
        result[n] = tn.dot(grad[n]*pdf, grad[n])
    return result
Exemple #5
0
def gaussian(shape, sigma_factor=0.2):
    """
    Create a multivariate Gaussian that is axis-aligned (i.e. with diagonal covariance matrix).

    :param shape: list of ints
    :param sigma_factor: a real (or list of reals) encoding the ratio sigma / shape. Default is 0.2, i.e. one fifth along each dimension

    :return: a :class:`Tensor` that sums to 1
    """

    if hasattr(shape[0], '__len__'):
        shape = shape[0]
    N = len(shape)
    if not hasattr(sigma_factor, '__len__'):
        sigma_factor = [sigma_factor]*N

    cores = [torch.ones(1, 1, 1) for n in range(N)]
    Us = []
    for n in range(N):
        sigma = sigma_factor[n] * shape[n]
        if shape[n] == 1:
            x = torch.Tensor([0])
        else:
            x = torch.linspace(-shape[n] / 2, shape[n] / 2, shape[n])
        U = torch.exp(-x**2 / (2*sigma**2))
        U = U[:, None] / torch.sum(U)
        Us.append(U)
    return tn.Tensor(cores, Us)
Exemple #6
0
def transpose(t):
    """
    Inverts the dimension order of a tensor, e.g. :math:`I_1 \\times I_2 \\times I_3` becomes :math:`I_3 \\times I_2 \\times I_1`.

    :param t: input tensor

    :return: another :class:`Tensor`, indexed by dimensions in inverse order
    """

    cores = []
    Us = []
    idxs = []
    for n in range(t.dim() - 1, -1, -1):
        if t.cores[n].dim() == 3:
            cores.append(t.cores[n].permute(2, 1, 0))
        else:
            cores.append(t.cores[n])
        if t.Us[n] is None:
            Us.append(None)
        else:
            Us.append(t.Us[n].clone())
        try:
            idxs.append(t.idxs[n].clone())
        except Exception:
            idxs.append(None)
    return tn.Tensor(cores, Us, idxs)
Exemple #7
0
def meshgrid(*axes, batch=False):
    """
    See NumPy's or PyTorch's `meshgrid()`.

    :param axes: a list of N ints or torch vectors

    :return: a list of N :class:`Tensor`, of N dimensions each
    """

    device = None
    if not hasattr(axes, '__len__'):
        axes = [axes]
    if hasattr(axes[0], '__len__'):
        axes = axes[0]
    if hasattr(axes[0], 'device'):
        device = axes[0].device
    axes = list(axes)
    N = len(axes)
    for n in range(N):
        if not hasattr(axes[n], '__len__'):
            axes[n] = torch.arange(axes[n], dtype=torch.get_default_dtype())

    tensors = []
    for n in range(N):
        cores = [torch.ones(1, len(ax), 1).to(device) for ax in axes]
        if isinstance(axes[n], torch.Tensor):
            cores[n] = axes[n].type(torch.get_default_dtype())
        else:
            cores[n] = torch.tensor(axes[n].type(torch.get_default_dtype()))
        cores[n] = cores[n][None, :, None].to(device)
        tensors.append(tn.Tensor(cores, device=device, batch=batch))
    return tensors
Exemple #8
0
def mask(t, mask):
    """
    Masks a tensor. Basically an element-wise product, but this function makes sure slices are matched according to their "meaning" (as annotated by the tensor's `idx` field, if available)

    :param t: input :class:`Tensor`
    :param mask: a mask :class:`Tensor`

    :return: masked :class:`Tensor`
    """
    device = t.cores[0].device
    if not hasattr(t, 'idxs'):
        idxs = [np.arange(sh) for sh in t.shape]
    else:
        idxs = t.idxs
    cores = []
    Us = []
    for n in range(t.dim()):
        idx = np.array(idxs[n])
        idx[idx >= mask.shape[n]] = mask.shape[n] - 1  # Clamp
        if mask.Us[n] is None:
            cores.append(mask.cores[n][..., idx, :].to(device))
            Us.append(None)
        else:
            cores.append(mask.cores[n].to(device))
            Us.append(mask.Us[n][idx, :])
    mask = tn.Tensor(cores, Us, device=device)
    return t * mask
Exemple #9
0
def meshgrid(*axes):
    """
    See NumPy's or PyTorch's `meshgrid()`.

    :param axes: a list of N ints or torch vectors

    :return: a list of N :class:`Tensor`, of N dimensions each
    """

    if not hasattr(axes, '__len__'):
        axes = [axes]
    if hasattr(axes[0], '__len__'):
        axes = axes[0]
    axes = list(axes)
    N = len(axes)
    for n in range(N):
        if not hasattr(axes[n], '__len__'):
            axes[n] = torch.arange(axes[n], dtype=torch.get_default_dtype())

    tensors = []
    for n in range(N):
        cores = [torch.ones(1, len(ax), 1) for ax in axes]
        cores[n] = torch.Tensor(axes[n].to(torch.get_default_dtype()))[None, :,
                                                                       None]
        tensors.append(tn.Tensor(cores))
    return tensors
Exemple #10
0
def partialset(t, order=1, mask=None, bounds=None):
    """
    Given a tensor, compute another one that contains all partial derivatives of certain order(s) and according to some optional mask.

    :Examples:

    >>> t = tn.rand([10, 10, 10])  # A 3D tensor
    >>> x, y, z = tn.symbols(3)
    >>> partialset(t, 1, x)  # x
    >>> partialset(t, 2, x)  # xx, xy, xz
    >>> partialset(t, 2, tn.only(y | z))  # yy, yz, zz

    :param t: a :class:`Tensor`
    :param order: an int or list of ints. Default is 1
    :param mask: an optional mask to select only a subset of partials
    :param bounds: a list of pairs [lower bound, upper bound] specifying parameter ranges (used to compute derivative steps). If None (default), all steps will be 1

    :return: a :class:`Tensor`
    """

    if bounds is None:
        bounds = [[0, sh - 1] for sh in t.shape]
    if not hasattr(order, '__len__'):
        order = [order]

    max_order = max(order)

    def diff(core, n):
        if core.dim() == 3:
            pad = torch.zeros(core.shape[0], 1, core.shape[2])
        else:
            pad = torch.zeros(1, core.shape[1])
        if core.shape[1] == 1:
            return pad
        step = (bounds[n][1] - bounds[n][0]) / (core.shape[-2] - 1)
        return torch.cat(((core[..., 1:, :] - core[..., :-1, :]) / step, pad),
                         dim=-2)

    cores = []
    idxs = []
    for n in range(t.dim()):
        if t.Us[n] is None:
            stack = [t.cores[n]]
        else:
            stack = [torch.einsum('ijk,aj->iak', (t.cores[n], t.Us[n]))]
        idx = torch.zeros([t.shape[n]])
        for o in range(1, max_order + 1):
            stack.append(diff(stack[-1], n))
            idx = torch.cat((idx, torch.ones(stack[-1].shape[-2]) * o))
            if o == max_order:
                break
        cores.append(torch.cat(stack, dim=-2))
        idxs.append(idx)
    d = tn.Tensor(cores, idxs=idxs)
    wm = tn.automata.weight_mask(t.dim(), order, nsymbols=max_order + 1)
    if mask is not None:
        wm = tn.mask(wm, mask)
    result = tn.mask(d, wm)
    result.idxs = idxs
    return result
Exemple #11
0
def anova_decomposition(t, marginals=None):
    """
    Compute an extended tensor that contains all terms of the ANOVA decomposition for a given tensor.

    Reference: R. Ballester-Ripoll, E. G. Paredes, and R. Pajarola: `"Sobol Tensor Trains for Global Sensitivity Analysis" (2017) <https://www.sciencedirect.com/science/article/pii/S0951832018303132?dgcid=rss_sd_all>`_

    :param t: ND input tensor
    :param marginals: list of N vectors, each containing the PMF for each variable (use None for uniform distributions)
    :return: a :class:`Tensor`
    """

    marginals = copy.deepcopy(marginals)
    if marginals is None:
        marginals = [None] * t.dim()
    for n in range(t.dim()):
        if marginals[n] is None:
            marginals[n] = torch.ones([t.shape[n]]) / float(t.shape[n])
    cores = [c.clone() for c in t.cores]
    Us = []
    idxs = []
    for n in range(t.dim()):
        if t.Us[n] is None:
            U = torch.eye(t.shape[n])
        else:
            U = t.Us[n]
        expected = torch.sum(U * (marginals[n][:, None] / torch.sum(marginals[n])), dim=0, keepdim=True)
        Us.append(torch.cat((expected, U-expected), dim=0))
        idxs.append([0] + [1]*t.shape[n])
    return tn.Tensor(cores, Us, idxs=idxs)
Exemple #12
0
def weight_one_hot(N, r=None, nsymbols=2):
    """
    Given a string with :math:`k` 1's, it produces a vector that represents :math:`k` in `one hot encoding <https://en.wikipedia.org/wiki/One-hot>`_

    :param N: number of dimensions
    :param r:
    :param nsymbols:

    :return: a vector of N zeros, except its :math:`k`-th element which is a 1
    """

    if not hasattr(nsymbols, '__len__'):
        nsymbols = [nsymbols] * N
    assert len(nsymbols) == N
    if r is None:
        r = N + 1

    cores = []
    for n in range(N):
        core = torch.zeros([r, nsymbols[n], r])
        core[:, 0, :] = torch.eye(r)
        for s in range(1, nsymbols[n]):
            core[:, s, s:] = torch.eye(r)[:, :-s]
        cores.append(core)
    cores[0] = cores[0][0:1, :, :]
    return tn.Tensor(cores)
Exemple #13
0
    def decompress_tucker_factors(self, dim='all', _clone=True):
        """
        Decompresses this tensor along the Tucker factors only.

        :param dim: int, list, or 'all' (default)

        :return: a :class:`Tensor` in CP/TT format, without Tucker factors
        """

        if dim == 'all':
            dim = range(self.dim())
        if not hasattr(dim, '__len__'):
            dim = [dim]*self.dim()

        cores = []
        Us = []
        for n in range(self.dim()):
            if n in dim and self.Us[n] is not None:
                if self.cores[n].dim() == 2:
                    cores.append(torch.einsum('jk,aj->ak', (self.cores[n], self.Us[n])))
                else:
                    cores.append(torch.einsum('ijk,aj->iak', (self.cores[n], self.Us[n])))
                Us.append(None)
            else:
                if _clone:
                    cores.append(self.cores[n].clone())
                    if self.Us[n] is not None:
                        Us.append(self.Us[n].clone())
                    else:
                        Us.append(None)
                else:
                    cores.append(self.cores[n])
                    Us.append(self.Us[n])
        return tn.Tensor(cores, Us, idxs=self.idxs)
Exemple #14
0
def test_from_ndarray():

    for i in range(100):
        gt = np.random.rand(*np.random.randint(1, 8, np.random.randint(1, 6)))
        t = tn.Tensor(gt)
        reco = t.numpy()
        assert np.linalg.norm(gt - reco) / np.linalg.norm(gt) < +1e-7
Exemple #15
0
def test_accepted_inputs():

    for i in range(10):
        gt = tn.Tensor(torch.randint(0, 2, (1, 2, 3, 4)))
        idx = tn.automata.accepted_inputs(gt)
        assert len(idx) == round(tn.sum(gt).item())
        assert torch.norm(gt[idx].torch() - 1).item() <= 1e-7
Exemple #16
0
    def torch(self):
        """
        Decompress into a PyTorch 2D tensor

        :return: a 2D torch.tensor
        """

        cores = [
            c.reshape(-1, c.shape[1], self.input_dims[i] *
                      self.output_dims[i], c.shape[-1])
            if self.batch else c.reshape(c.shape[0], -1, c.shape[-1])
            for i, c in enumerate(self.cores)
        ]
        tensor = tn.Tensor(cores, batch=self.batch).torch()
        rows = torch.prod(self.input_dims)
        cols = torch.prod(self.output_dims)

        shape: List[int] = torch.tensor(
            list(zip(self.input_dims, self.output_dims))).flatten().tolist()
        if self.batch:
            tensor = tensor.reshape([-1] + shape)
            dims = list(range(1, 2 * self.d + 1))
            tensor = tensor.permute([0] + dims[1::2] + dims[2::2])
            return tensor.reshape(-1, rows, cols)
        else:
            tensor = tensor.reshape(shape)
            dims = list(range(2 * self.d))
            tensor = tensor.permute(dims[0::2] + dims[1::2])
            return tensor.reshape(rows, cols)
Exemple #17
0
    def __init__(self, t, eps=1e-9, verbose=False):

        # if isinstance(t, tt.core.vector.vector):
        #     t = tn.Tensor([torch.Tensor(c) for c in tt.vector.to_list(t)])

        ###########################################
        # Precompute all 4 types of Sobol indices #
        ###########################################

        t = t
        N = t.dim()
        tsq = t.decompress_tucker_factors()
        for n in range(N):
            tsq.cores[n] = torch.cat(
                [torch.mean(tsq.cores[n], dim=1, keepdim=True), tsq.cores[n]],
                dim=1)
        tsq = tn.cross(tensors=[tsq],
                       function=lambda x: x**2,
                       eps=eps,
                       verbose=verbose)

        st_cores = []
        for n in range(N):
            st_cores.append(
                torch.cat([
                    tsq.cores[n][:, :1, :],
                    torch.mean(tsq.cores[n][:, 1:, :], dim=1, keepdim=True) -
                    tsq.cores[n][:, :1, :]
                ],
                          dim=1))
        st = tn.Tensor(st_cores)
        var = tn.sum(st) - st[(0, ) * N]
        self.st = tn.round_tt(st / var, eps=eps)
        self.st -= tn.none(N) * self.st[(0, ) *
                                        N]  # Set element 0, ..., 0 to zero
        self.sst = tn.Tensor([
            torch.cat([c[:, :1, :] + c[:, 1:2, :], c[:, 1:2, :]], dim=1)
            for c in self.st.cores
        ])
        self.cst = tn.Tensor([
            torch.cat([c[:, :1, :], c[:, :1, :] + c[:, 1:2, :]], dim=1)
            for c in self.st.cores
        ])
        self.tst = 1 - tn.Tensor([
            torch.cat([c[:, :1, :] + c[:, 1:2, :], c[:, :1, :]], dim=1)
            for c in self.st.cores
        ])
Exemple #18
0
    def __setitem__(self, key, value):  # TODO not fully working yet
        key = self._process_key(key)
        scalar = False
        if isinstance(value, np.ndarray):
            value = tn.Tensor(torch.Tensor(value))
        elif isinstance(value, torch.Tensor):
            if value.dim() == 0:
                value = value.item()
                scalar = True
                # value = value*torch.ones(self.shape)
            else:
                value = tn.Tensor(value)
        elif isinstance(value, tn.Tensor):
            pass
        else:  # It's a scalar
            scalar = True

        subtract_cores = []
        add_cores = []
        for i in range(len(key)):
            if not isinstance(key[i], slice) and not hasattr(key[i], '__len__'):
                key[i] = slice(key[i], key[i]+1)
            chunk = self.cores[i][..., key[i], :]
            subtract_core = torch.zeros_like(self.cores[i])
            subtract_core[..., key[i], :] += chunk
            subtract_cores.append(subtract_core)
            if scalar:
                if self.cores[i].dim() == 3:
                    add_core = torch.zeros(1, self.shape[i], 1)
                else:
                    add_core = torch.zeros(self.shape[i], 1)
                add_core[..., key[i], :] += 1
                if i == 0:
                    add_core *= value
            else:
                if chunk.shape[1] != value.shape[i]:
                    raise ValueError('{}-th dimension mismatch in tensor assignment: {} (lhs) != {} (rhs)'.format(i, chunk.shape[1], value.shape[i]))
                if self.cores[i].dim() == 3:
                    add_core = torch.zeros(value.cores[i].shape[0], self.shape[i], value.cores[i].shape[2])
                else:
                    add_core = torch.zeros(self.shape[i], value.cores[i].shape[1])
                add_core[..., key[i], :] += value.cores[i]
            add_cores.append(add_core)
        # print(tn.Tensor(subtract_cores), tn.Tensor(add_cores))
        result = self - tn.Tensor(subtract_cores) + tn.Tensor(add_cores)
        self.__init__(result.cores, result.Us, self.idxs)
Exemple #19
0
    def clone(self):
        """
        Creates a copy of this tensor (calls PyTorch's `clone()` on all internal tensor network nodes)

        :return: another compressed tensor
        """

        cores = [self.cores[n].clone()for n in range(self.dim())]
        Us = []
        for n in range(self.dim()):
            if self.Us[n] is None:
                Us.append(None)
            else:
                Us.append(self.Us[n].clone())
        if hasattr(self, 'idxs'):
            return tn.Tensor(cores, Us=Us, idxs=self.idxs)
        return tn.Tensor(cores, Us=Us)
Exemple #20
0
def test_squeeze():

    for i in range(100):
        x = np.random.randint(1, 3, np.random.randint(2, 10))
        t = tn.Tensor(x)
        x = np.squeeze(x)
        t = tn.squeeze(t)
        assert np.array_equal(x.shape, t.shape)
Exemple #21
0
    def tucker_core(self):
        """
        If this is a Tucker-like tensor, returns its Tucker core as an explicit PyTorch tensor.

        If this tensor does not have Tucker factors, then it returns the full decompressed tensor.

        :return: a PyTorch tensor
        """

        return tn.Tensor(self.cores).torch()
Exemple #22
0
def true(N):
    """
    Create a formula (N-dimensional tensor) that is always true.

    :param N: an integer

    :return: a :math:`2^N` :class:`Tensor`
    """

    return tn.Tensor([torch.ones([1, 2, 1]) for n in range(N)])
Exemple #23
0
def logspace(*args, **kwargs):
    """
    Creates a 1D :class:`Tensor` with logarithmically spaced values (see PyTorch's `logspace`).

    :param args:
    :param kwargs:

    :return: a 1D :class:`Tensor`
    """

    return tn.Tensor([torch.logspace(*args, **kwargs)[None, :, None]])
Exemple #24
0
def arange(*args, **kwargs):
    """
    Creates a 1D :class:`Tensor` (see PyTorch's `arange`).

    :param args:
    :param kwargs:

    :return: a 1D :class:`Tensor`
    """

    return tn.Tensor([torch.arange(*args, dtype=torch.get_default_dtype(), **kwargs)[None, :, None]])
Exemple #25
0
def _create(function, *shape, ranks_tt=None, ranks_cp=None, ranks_tucker=None, requires_grad=False, device=None):
    if hasattr(shape[0], '__len__'):
        shape = shape[0]
    N = len(shape)
    if not hasattr(ranks_tucker, "__len__"):
        ranks_tucker = [ranks_tucker for n in range(len(shape))]
    corespatials = []
    for n in range(len(shape)):
        if ranks_tucker[n] is None:
            corespatials.append(shape[n])
        else:
            corespatials.append(ranks_tucker[n])
    if ranks_tt is None and ranks_cp is None:
        if ranks_tucker is None:
            raise ValueError('Specify at least one of: ranks_tt ranks_cp, ranks_tucker')
        # We imitate a Tucker decomposition: we set full TT-ranks
        datashape = [corespatials[0], np.prod(corespatials) // corespatials[0]]
        ranks_tt = []
        for n in range(1, N):
            ranks_tt.append(min(datashape))
            datashape = [datashape[0] * corespatials[n], datashape[1] // corespatials[n]]
    if not hasattr(ranks_tt, "__len__"):
        ranks_tt = [ranks_tt]*(N-1)
    ranks_tt = [None] + list(ranks_tt) + [None]
    if not hasattr(ranks_cp, '__len__'):
        ranks_cp = [ranks_cp]*N
    coreranks = [r for r in ranks_tt]
    for n in range(N):
        if ranks_cp[n] is not None:
            if ranks_tt[n] is not None or ranks_tt[n+1] is not None:
                raise ValueError('The ranks_tt and ranks_cp provided are incompatible')
            coreranks[n] = ranks_cp[n]
            coreranks[n+1] = ranks_cp[n]
    assert len(coreranks) == N+1
    if coreranks[0] is None:
        coreranks[0] = 1
    if coreranks[-1] is None:
        coreranks[-1] = 1
    if coreranks.count(None) > 0:
        raise ValueError('One or more TT/CP ranks were not specified')
    assert len(ranks_tucker) == N

    cores = []
    Us = []
    for n in range(len(shape)):
        if ranks_tucker[n] is None:
            Us.append(None)
        else:
            Us.append(function([shape[n], ranks_tucker[n]], requires_grad=requires_grad, device=device))
        if ranks_cp[n] is None:
            cores.append(function([coreranks[n], corespatials[n], coreranks[n+1]], requires_grad=requires_grad, device=device))
        else:
            cores.append(function([corespatials[n], ranks_cp[n]], requires_grad=requires_grad, device=device))
    return tn.Tensor(cores, Us=Us)
    def __init__(self, lstm_cell_layer, ranks_tt=70):
        """LSTMCell class wrapper with tensor-trained weights."""
        super(TTLSTMCell, self).__init__()

        self.input_size = lstm_cell_layer.input_size
        self.hidden_size = lstm_cell_layer.hidden_size
        self.bias = lstm_cell_layer.bias

        self.weight_ih = ParameterList([
            Parameter(core) \
                for core in tn.Tensor(lstm_cell_layer.weight_ih.data.T,
                    ranks_tt=ranks_tt).cores
        ])
        self.weight_hh = ParameterList([
            Parameter(core) \
                for core in tn.Tensor(lstm_cell_layer.weight_hh.data.T,
                    ranks_tt=ranks_tt).cores
        ])
        if self.bias:
            self.bias_ih = Parameter(lstm_cell_layer.bias_ih.data)
            self.bias_hh = Parameter(lstm_cell_layer.bias_hh.data)
    def __init__(self, lstm_layer, ranks_tt=70):
        """LSTM class wrapper with tensor-trained weights."""
        super(TTLSTM, self).__init__()

        self.input_size = lstm_layer.input_size
        self.hidden_size = lstm_layer.hidden_size
        self.num_layers = lstm_layer.num_layers
        self.bias = lstm_layer.bias
        self.bidirectional = lstm_layer.bidirectional

        self.weight_ih = ParameterList([
            Parameter(core) \
                for core in tn.Tensor(lstm_layer.weight_ih_l0.data.T,
                    ranks_tt=ranks_tt).cores
        ])
        self.weight_hh = ParameterList([
            Parameter(core) \
                for core in tn.Tensor(lstm_layer.weight_hh_l0.data.T,
                    ranks_tt=ranks_tt).cores
        ])
        if self.bias:
            self.bias_ih = Parameter(lstm_layer.bias_ih_l0.data)
            self.bias_hh = Parameter(lstm_layer.bias_hh_l0.data)

        if self.bidirectional:
            self.weight_ih_reverse = ParameterList([
                Parameter(core) \
                    for core in tn.Tensor(lstm_layer.weight_ih_l0_reverse.data.T,
                        ranks_tt=ranks_tt).cores
            ])
            self.weight_hh_reverse = ParameterList([
                Parameter(core) \
                    for core in tn.Tensor(lstm_layer.weight_hh_l0_reverse.data.T,
                        ranks_tt=ranks_tt).cores
            ])
            if self.bias:
                self.bias_ih_reverse = Parameter(
                    lstm_layer.bias_ih_l0_reverse.data)
                self.bias_hh_reverse = Parameter(
                    lstm_layer.bias_hh_l0_reverse.data)
Exemple #28
0
def ttm(t, U, dim=None, transpose=False):
    """
    `Tensor-times-matrix (TTM) <https://epubs.siam.org/doi/pdf/10.1137/07070111X>`_ along one or several dimensions.

    :param t: input :class:`Tensor`
    :param U: one or several factors
    :param dim: one or several dimensions (may be vectors or matrices). If None, the first len(U) dims are assumed
    :param transpose: if False (default) the contraction is performed
     along U's rows, else along its columns

    :return: transformed :class:`Tensor`
    """

    if not isinstance(U, (list, tuple)):
        U = [U]
    if dim is None:
        dim = range(len(U))
    if not hasattr(dim, '__len__'):
        dim = [dim]
    dim = list(dim)
    for i in range(len(dim)):
        if dim[i] < 0:
            dim[i] += t.dim()

    cores = []
    Us = []
    for n in range(t.dim()):
        if n in dim:
            if transpose:
                factor = U[dim.index(n)].t()
            else:
                factor = U[dim.index(n)]
            if factor.dim() == 1:
                factor = factor[None, :]
            if t.Us[n] is None:
                if t.cores[n].dim() == 3:
                    cores.append(
                        torch.einsum('iak,ja->ijk', (t.cores[n], factor)))
                else:
                    cores.append(
                        torch.einsum('ai,ja->ji', (t.cores[n], factor)))
                Us.append(None)
            else:
                cores.append(t.cores[n].clone())
                Us.append(torch.matmul(factor, t.Us[n]))
        else:
            cores.append(t.cores[n].clone())
            if t.Us[n] is None:
                Us.append(None)
            else:
                Us.append(t.Us[n].clone())
    return tn.Tensor(cores, Us=Us, idxs=t.idxs)
Exemple #29
0
def eye(n, m=None, device=None, requires_grad=None):
    """
    Generates identity matrix like PyTorch's `eye()`.

    :param n: number of rows
    :param m: number of columns (default is n)

    :return: a 2D :class:`Tensor`
    """

    c1 = torch.eye(n, m)
    c2 = torch.eye(m, m)
    return tn.Tensor([c1[None, :, :], c2[:, :, None]], device=device, requires_grad=requires_grad)
Exemple #30
0
def hash(t):
    """
    Computes an integer number that depends on the tensor entries (not on its internal compressed representation).

    We obtain it as :math:`\\langle T, W \\rangle`, where :math:`W` is a rank-1 tensor of weights selected at random (always the same seed).

    :return: an integer
    """

    gen = torch.Generator()
    gen.manual_seed(0)
    cores = [torch.ones(1, 1, 1) for n in range(t.dim())]
    Us = [torch.rand([sh, 1], generator=gen) for sh in t.shape]
    w = tn.Tensor(cores, Us)
    return t.dot(w)