def test_kruskal_to_tensor_with_weights():
    A = tl.reshape(tl.arange(1, 5), (2, 2))
    B = tl.reshape(tl.arange(5, 9), (2, 2))
    weights = tl.tensor([2, -1])

    out = kruskal_to_tensor([A, B], weights=weights)
    expected = tl.tensor([[-2, -2], [6, 10]])  # computed by hand
    tl.assert_array_equal(out, expected)
Beispiel #2
0
def monotonicity_prox(tensor, decreasing=False):
    """
    This function projects each column of the input array on the set of arrays so that
          x[1] <= x[2] <= ... <= x[n] (decreasing=False)
                        or
          x[1] >= x[2] >= ... >= x[n] (decreasing=True)
    is satisfied columnwise.

    Parameters
    ----------
    tensor : ndarray
    decreasing : If it is True, function returns columnwise
                 monotone decreasing tensor. Otherwise, returned array
                 will be monotone increasing.
                 Default: True

    Returns
    -------
    ndarray
          A tensor of which columns' are monotonic.

    References
    ----------
    .. [1]: G. Chierchia, E. Chouzenoux, P. L. Combettes, and J.-C. Pesquet
            "The Proximity Operator Repository. User's guide"
    """
    if tl.ndim(tensor) == 1:
        tensor = tl.reshape(tensor, [tl.shape(tensor)[0], 1])
    elif tl.ndim(tensor) > 2:
        raise ValueError(
            "Monotonicity prox doesn't support an input which has more than 2 dimensions."
        )
    tensor_mon = tl.copy(tensor)
    if decreasing:
        tensor_mon = tl.flip(tensor_mon, axis=0)
    row, column = tl.shape(tensor_mon)
    cum_sum = tl.cumsum(tensor_mon, axis=0)
    for j in range(column):
        assisted_tensor = tl.zeros([row, row])
        for i in range(row):
            if i == 0:
                assisted_tensor = tl.index_update(
                    assisted_tensor, tl.index[i, i:], cum_sum[i:, j] /
                    tl.tensor(tl.arange(row - i) + 1, **tl.context(tensor)))
            else:
                assisted_tensor = tl.index_update(
                    assisted_tensor, tl.index[i, i:],
                    (cum_sum[i:, j] - cum_sum[i - 1, j]) /
                    tl.tensor(tl.arange(row - i) + 1, **tl.context(tensor)))
        tensor_mon = tl.index_update(tensor_mon, tl.index[:, j],
                                     tl.max(assisted_tensor, axis=0))
        for i in reversed(range(row - 1)):
            if tensor_mon[i, j] > tensor_mon[i + 1, j]:
                tensor_mon = tl.index_update(tensor_mon, tl.index[i, j],
                                             tensor_mon[i + 1, j])
    if decreasing:
        tensor_mon = tl.flip(tensor_mon, axis=0)
    return tensor_mon
Beispiel #3
0
    def _apply_tensor_dropout(self, cp_tensor, training=True):
        if (not self.proba) or ((not training) and (not self.drop_test)):
            return cp_tensor

        rank = cp_tensor.rank
        device = cp_tensor.factors[0].device

        if rank > self.min_dim:
            sampled_indices = tl.arange(rank, device=device, dtype=torch.int64)
            sampled_indices = sampled_indices[torch.bernoulli(
                torch.ones(rank, device=device) * (1 - self.proba),
                out=torch.empty(rank, device=device, dtype=torch.bool))]
            if len(sampled_indices) == 0:
                sampled_indices = torch.randint(0,
                                                rank,
                                                size=(self.min_values, ),
                                                device=device,
                                                dtype=torch.int64)

            factors = [
                factor[:, sampled_indices] for factor in cp_tensor.factors
            ]
            if training:
                weights = cp_tensor.weights[sampled_indices] * (
                    1 / (1 - self.proba))
            else:
                weights = cp_tensor.weights[sampled_indices]

        return CPTensor(weights, factors)
Beispiel #4
0
    def _apply_tensor_dropout(self, tucker_tensor, training=True):
        if (not self.proba) or ((not training) and (not self.drop_test)):
            return tucker_tensor

        core, factors = tucker_tensor.core, tucker_tensor.factors
        tucker_rank = tucker_tensor.rank

        sampled_indices = []
        for rank in tucker_rank:
            idx = tl.arange(rank, device=core.device, dtype=torch.int64)
            if rank > self.min_dim:
                idx = idx[torch.bernoulli(
                    torch.ones(rank, device=core.device) * (1 - self.proba),
                    out=torch.empty(rank, device=core.device,
                                    dtype=torch.bool))]
                if len(idx) == 0:
                    idx = torch.randint(0,
                                        rank,
                                        size=(self.min_values, ),
                                        device=core.device,
                                        dtype=torch.int64)

            sampled_indices.append(idx)

        if training:
            core = core[torch.meshgrid(*sampled_indices)] * (1 / (
                (1 - self.proba)**core.ndim))
        else:
            core = core[torch.meshgrid(*sampled_indices)]

        factors = [
            factor[:, idx] for (factor, idx) in zip(factors, sampled_indices)
        ]

        return TuckerTensor(core, factors)
Beispiel #5
0
def test_cp_to_tensor_with_weights():
    A = tl.reshape(tl.arange(1,5), (2,2))
    B = tl.reshape(tl.arange(5,9), (2,2))
    weigths = tl.tensor([2,-1], **tl.context(A))

    out = cp_to_tensor((weigths, [A,B]))
    expected = tl.tensor([[-2,-2], [6, 10]])  # computed by hand
    assert_array_equal(out, expected)

    (weigths, factors) = random_cp((5, 5, 5), rank=5, normalise_factors=True, full=False)
    true_res = tl.dot(tl.dot(factors[0], tl.diag(weigths)),
                      tl.transpose(tl.tenalg.khatri_rao(factors[1:])))
    true_res = tl.fold(true_res, 0, (5, 5, 5))  
    res = cp_to_tensor((weigths, factors))
    assert_array_almost_equal(true_res, res,
     err_msg='weights incorrectly incorporated in cp_to_tensor')
Beispiel #6
0
    def _apply_tensor_dropout(self, tt_tensor, training=True):
        if (not self.proba) or ((not training) and (not self.drop_test)):
            return tt_tensor

        device = tt_tensor.factors[0].device

        sampled_indices = []
        for i, rank in enumerate(tt_tensor.rank[1:]):
            if rank > self.min_dim:
                idx = tl.arange(rank, device=device, dtype=torch.int64)
                idx = idx[torch.bernoulli(torch.ones(rank, device=device) *
                                          (1 - self.proba),
                                          out=torch.empty(rank,
                                                          device=device,
                                                          dtype=torch.bool))]
                if len(idx) == 0:
                    idx = torch.randint(0,
                                        rank,
                                        size=(self.min_values, ),
                                        device=device,
                                        dtype=torch.int64)
            else:
                idx = tl.arange(rank, device=device,
                                dtype=torch.int64).tolist()

            sampled_indices.append(idx)

        sampled_factors = []
        if training:
            scaling = 1 / (1 - self.proba)
        else:
            scaling = 1
        for i, f in enumerate(tt_tensor.factors):
            if i == 0:
                sampled_factors.append(f[..., sampled_indices[i]] * scaling)
            elif i == (tt_tensor.order - 1):
                sampled_factors.append(f[sampled_indices[i - 1], ...])
            else:
                sampled_factors.append(
                    f[sampled_indices[i - 1], ...][..., sampled_indices[i]] *
                    scaling)

        return TTTensor(sampled_factors)
Beispiel #7
0
def test_prod():
    """Test for _prod (when math.prod unavailable)"""
    assert _prod([1, 2, 3, 4]) == prod([1, 2, 3, 4])
    assert _prod([]) == 1
    assert _prod(tl.arange(1, 5)) == _prod([1, 2, 3, 4])