コード例 #1
0
ファイル: test_automata.py プロジェクト: xx-fighting/tntorch
def test_accepted_inputs():

    for i in range(10):
        gt = tn.Tensor(torch.randint(0, 2, (1, 2, 3, 4)))
        idx = tn.automata.accepted_inputs(gt)
        assert len(idx) == round(tn.sum(gt).item())
        assert torch.norm(gt[idx].torch() - 1).item() <= 1e-7
コード例 #2
0
ファイル: automata.py プロジェクト: rabusseau/tntorch
def accepted_inputs(t):
    """
    Returns all strings accepted by an automaton, in alphabetical order.

    Note: each string s will appear as many times as the value t[s]

    :param t: a :class:`Tensor`

    :return Xs: a Torch matrix, each row is one string
    """
    def recursion(Xs, left, rights, bound, mu):
        if mu == t.dim():
            return
        fiber = torch.einsum('ijk,k->ij', (t.cores[mu], rights[mu + 1]))
        per_point = torch.matmul(left, fiber).round()
        c = torch.cat((torch.Tensor([0]), per_point.cumsum(dim=0))).long()
        for i, p in enumerate(per_point):
            if c[i] == c[i + 1]:  # Improductive prefix, don't go further
                continue
            Xs[bound + c[i]:bound + c[i + 1], mu] = i
            recursion(Xs, torch.matmul(left, t.cores[mu][:, i, :]), rights,
                      bound + c[i], mu + 1)

    Xs = torch.zeros([round(tn.sum(t).item()), t.dim()], dtype=torch.long)
    rights = [torch.ones(1)]  # Precomputed right-product chains
    for core in t.cores[::-1]:
        rights.append(torch.matmul(torch.sum(core, dim=1), rights[-1]))
    rights = rights[::-1]
    recursion(Xs, torch.ones(1), rights, 0, 0)
    return Xs
コード例 #3
0
ファイル: logic.py プロジェクト: rballester/tntorch
def is_satisfiable(t):
    """
    Checks if a formula can be satisfied.

    :param t: a :math:`2^N` :class:`Tensor`

    :return: True if `t` is satisfiable; False otherwise
    """

    return bool(tn.sum(t) >= 1e-6)
コード例 #4
0
def mean(t, dim=None, keepdim=False):
    """
    Computes the mean of a :class:`Tensor` along all or some of its dimensions.

    :param t: a :class:`Tensor`
    :param dim: an int or list of ints (default: all)
    :param keepdim: whether to keep the same number of dimensions

    :return: a scalar (if keepdim is False and all dims were chosen) or :class:`Tensor` otherwise
    """

    return tn.sum(t, dim, keepdim, _normalize=True)
コード例 #5
0
def mean(t, dim=None, marginals=None, keepdim=False):
    """
    Computes the mean of a :class:`Tensor` along all or some of its dimensions.

    :param t: a :class:`Tensor`
    :param dim: an int or list of ints (default: all)
    :param marginals: an optional list of vectors
    :param keepdim: whether to keep the same number of dimensions

    :return: a scalar (if keepdim is False and all dims were chosen) or :class:`Tensor` otherwise
    """

    if marginals is not None:
        pdfcores = [torch.ones(sh) / sh for sh in t.shape]
        if dim is None:
            dim = range(t.dim())
        for d, marg in zip(dim, marginals):
            pdfcores[d] = marg[None, :, None] / marg.sum()
        pdf = tn.Tensor(pdfcores)
        return tn.sum(t * pdf, dim, keepdim)

    return tn.sum(t, dim, keepdim, _normalize=True)
コード例 #6
0
    def __init__(self, t, eps=1e-9, verbose=False):

        # if isinstance(t, tt.core.vector.vector):
        #     t = tn.Tensor([torch.Tensor(c) for c in tt.vector.to_list(t)])

        ###########################################
        # Precompute all 4 types of Sobol indices #
        ###########################################

        t = t
        N = t.dim()
        tsq = t.decompress_tucker_factors()
        for n in range(N):
            tsq.cores[n] = torch.cat(
                [torch.mean(tsq.cores[n], dim=1, keepdim=True), tsq.cores[n]],
                dim=1)
        tsq = tn.cross(tensors=[tsq],
                       function=lambda x: x**2,
                       eps=eps,
                       verbose=verbose)

        st_cores = []
        for n in range(N):
            st_cores.append(
                torch.cat([
                    tsq.cores[n][:, :1, :],
                    torch.mean(tsq.cores[n][:, 1:, :], dim=1, keepdim=True) -
                    tsq.cores[n][:, :1, :]
                ],
                          dim=1))
        st = tn.Tensor(st_cores)
        var = tn.sum(st) - st[(0, ) * N]
        self.st = tn.round_tt(st / var, eps=eps)
        self.st -= tn.none(N) * self.st[(0, ) *
                                        N]  # Set element 0, ..., 0 to zero
        self.sst = tn.Tensor([
            torch.cat([c[:, :1, :] + c[:, 1:2, :], c[:, 1:2, :]], dim=1)
            for c in self.st.cores
        ])
        self.cst = tn.Tensor([
            torch.cat([c[:, :1, :], c[:, :1, :] + c[:, 1:2, :]], dim=1)
            for c in self.st.cores
        ])
        self.tst = 1 - tn.Tensor([
            torch.cat([c[:, :1, :] + c[:, 1:2, :], c[:, :1, :]], dim=1)
            for c in self.st.cores
        ])
コード例 #7
0
ファイル: tools.py プロジェクト: rballester/tntorch
def sample(t, P=1, seed=None):
    """
    Generate P points (with replacement) from a joint PDF distribution represented by a tensor.

    The tensor does not have to sum 1 (will be handled in a normalized form).

    :param t: a :class:`Tensor`
    :param P: how many samples to draw (default: 1)

    :return Xs: an integer matrix of size :math:`P \\times N`
    """
    def from_matrix(M):
        """
        Treat each row of a matrix M as a PMF and select a column per row according to it
        """

        M = np.abs(M)
        M /= torch.sum(M, dim=1)[:, None]  # Normalize row-wise
        M = np.hstack([np.zeros([M.shape[0], 1]), M])
        M = np.cumsum(M, axis=1)
        thresh = rng.random(M.shape[0])
        M -= thresh[:, np.newaxis]
        shiftand = np.logical_and(M[:, :-1] <= 0,
                                  M[:, 1:] > 0)  # Find where the sign switches
        return np.where(shiftand)[1]

    rng = np.random.default_rng(seed=seed)
    N = t.dim()
    tsum = tn.sum(t, dim=np.arange(N),
                  keepdim=True).decompress_tucker_factors()
    Xs = torch.zeros([P, N])
    rights = [torch.ones(1)]
    for core in tsum.cores[::-1]:
        rights.append(torch.matmul(torch.sum(core, dim=1), rights[-1]))
    rights = rights[::-1]
    lefts = torch.ones([P, 1])
    t = t.decompress_tucker_factors()
    for mu in range(t.dim()):
        fiber = torch.einsum('ijk,k->ij', (t.cores[mu], rights[mu + 1]))
        per_point = torch.einsum('ij,jk->ik', (lefts, fiber))
        rows = from_matrix(per_point)
        Xs[:, mu] = torch.tensor(rows)
        lefts = torch.einsum('ij,jik->ik', (lefts, t.cores[mu][:, rows, :]))

    return Xs
コード例 #8
0
    def __getitem__(self, key):
        """
        NumPy-style indexing for compressed tensors. There are 5 accessors supported: slices, index arrays, integers,
        None, or another Tensor (selection via binary indexing)

        - Index arrays can be lists, tuples, or vectors
        - All index arrays must have the same length P
        - In NumPy, index arrays and slices can be interleaved. We do not admit that, as it requires expensive transpose operations

        """

        # Preprocessing
        if isinstance(key, Tensor):
            if torch.abs(tn.sum(key)-1) > 1e-8:
                raise ValueError("When indexing via a mask tensor, that mask should have exactly 1 accepting string")
            s = tn.accepted_inputs(key)[0]
            slicing = []
            for n in range(self.dim()):
                idx = self.idxs[n].long()
                idx[idx > 1] = 1
                idx = np.where(idx == s[n])[0]
                sl = slice(idx[0], idx[-1]+1)
                lenidx = len(idx)
                if lenidx == 1:
                    sl = idx.item()
                slicing.append(sl)
            return self[slicing]

        if isinstance(key, torch.Tensor):
            key = np.array(key, dtype=np.int)
        if isinstance(key, np.ndarray) and key.ndim == 2:
            key = [key[:, col] for col in range(key.shape[1])]
        key = self._process_key(key)
        last_mode = None
        factors = {'int': None, 'index': None, 'index_done': False}
        cores = []
        Us = []
        counter = 0

        def join_cores(c1, c2):
            if c1.dim() == 1 and c2.dim() == 2:
                return torch.einsum('i,ai->ai', (c1, c2))
            elif c1.dim() == 2 and c2.dim() == 2:
                return torch.einsum('ij,aj->iaj', (c1, c2))
            elif c1.dim() == 1 and c2.dim() == 3:
                return torch.einsum('i,iaj->iaj', (c1, c2))
            elif c1.dim() == 2 and c2.dim() == 3:
                return torch.einsum('ij,jak->iak', (c1, c2))
            else:
                raise ValueError

        def insert_core(factors, core=None, key=None, U=None):
            if factors['index'] is not None:
                if factors['int'] is not None:
                    factors['index'] = join_cores(factors['int'], factors['index'])
                    factors['int'] = None
                cores.append(factors['index'])
                Us.append(None)
                factors['index'] = None
                factors['index_done'] = True
            if core is not None:
                if factors['int'] is not None:  # There is a previous 1D/2D core (CP/Tucker) from an integer slicing
                    if U is None:
                        cores.append(join_cores(factors['int'], core[..., key, :]))
                        Us.append(None)
                    else:
                        cores.append(join_cores(factors['int'], core))
                        Us.append(U[key, :])
                    factors['int'] = None
                else:  # Easiest case
                    if U is None:
                        cores.append(core[..., key, :])
                        Us.append(None)
                    else:
                        cores.append(core)
                        Us.append(U[key, :])

        def get_key(counter, key):
            if self.Us[counter] is None:
                return self.cores[counter][..., key, :]
            else:
                sl = self.Us[counter][key, :]
                if sl.dim() == 1:  # key is an int
                    if self.cores[counter].dim() == 3:
                        return torch.einsum('ijk,j->ik', (self.cores[counter], sl))
                    else:
                        return torch.einsum('ji,j->i', (self.cores[counter], sl))
                else:
                    if self.cores[counter].dim() == 3:
                        return torch.einsum('ijk,aj->iak', (self.cores[counter], sl))
                    else:
                        return torch.einsum('ji,aj->ai', (self.cores[counter], sl))

        for i in range(len(key)):
            if hasattr(key[i], '__len__'):
                this_mode = 'index'
            elif key[i] is None:
                this_mode = 'none'
            elif isinstance(key[i], (int, np.integer)):
                this_mode = 'int'
            elif isinstance(key[i], slice):
                this_mode = 'slice'
            else:
                raise IndexError

            if this_mode == 'none':
                insert_core(factors, torch.eye(self.ranks_tt[counter].item())[:, None, :], key=slice(None), U=None)
            elif this_mode == 'slice':
                insert_core(factors, self.cores[counter], key=key[i], U=self.Us[counter])
                counter += 1
            elif this_mode == 'index':
                if factors['index_done']:
                    raise IndexError("All index arrays must appear contiguously")
                if factors['index'] is None:
                    factors['index'] = get_key(counter, key[i])
                else:
                    if factors['index'].shape[-2] != len(key[i]):
                        raise ValueError("Index arrays must have the same length")
                    a1 = factors['index']
                    a2 = get_key(counter, key[i])
                    if a1.dim() == 2 and a2.dim() == 2:
                        factors['index'] = torch.einsum('ai,ai->ai', (a1, a2))
                    elif a1.dim() == 2 and a2.dim() == 3:
                        factors['index'] = torch.einsum('ai,iaj->iaj', (a1, a2))
                    elif a1.dim() == 3 and a2.dim() == 2:
                        factors['index'] = torch.einsum('iaj,aj->iaj', (a1, a2))
                    elif a1.dim() == 3 and a2.dim() == 3:
                        # Until https://github.com/pytorch/pytorch/issues/10661 is fully resolved  # TODO check efficiency for other cases
                        factors['index'] = torch.sum(a1[:, :, :, None]*a2.permute(1, 0, 2)[None, :, :, :], dim=2)
                        # factors['index'] = torch.einsum('iaj,jak->iak', (a1, a2))
                counter += 1
            elif this_mode == 'int':
                if last_mode == 'index':
                    insert_core(factors)
                if factors['int'] is None:
                    factors['int'] = get_key(counter, key[i])
                else:
                    c1 = factors['int']
                    c2 = get_key(counter, key[i])
                    if c1.dim() == 1 and c2.dim() == 1:
                        factors['int'] = torch.einsum('i,i->i', (c1, c2))
                    elif c1.dim() == 1 and c2.dim() == 2:
                        factors['int'] = torch.einsum('i,ij->ij', (c1, c2))
                    elif c1.dim() == 2 and c2.dim() == 1:
                        factors['int'] = torch.einsum('ij,j->ij', (c1, c2))
                    elif c1.dim() == 2 and c2.dim() == 2:
                        factors['int'] = torch.einsum('ij,jk->ik', (c1, c2))
                counter += 1
            last_mode = this_mode

        # At the end: handle possibly pending factors
        if last_mode == 'index':
            insert_core(factors, core=None, key=None, U=None)
        elif last_mode == 'int':
            if len(cores) > 0:  # We return a tensor: absorb existing cores with int factor
                if cores[-1].dim() == 2 and factors['int'].dim() == 1:
                    cores[-1] = torch.einsum('ai,i->ai', (cores[-1], factors['int']))
                elif cores[-1].dim() == 2 and factors['int'].dim() == 2:
                    cores[-1] = torch.einsum('ai,ij->iaj', (cores[-1], factors['int']))
                elif cores[-1].dim() == 3 and factors['int'].dim() == 1:
                    cores[-1] = torch.einsum('iaj,j->ai', (cores[-1], factors['int']))
                elif cores[-1].dim() == 3 and factors['int'].dim() == 2:
                    cores[-1] = torch.einsum('iaj,jk->iak', (cores[-1], factors['int']))
            else:  # We return a scalar
                if factors['int'].numel() > 1:
                    return torch.sum(factors['int'])
                return torch.squeeze(factors['int'])
        return tn.Tensor(cores, Us=Us)
コード例 #9
0
    def sum(self, **kwargs):
        """
        See :func:`metrics.sum()`.
        """

        return tn.sum(self, **kwargs)