Exemplo n.º 1
0
def trl(x, weight, bias=None, **kwargs):
    """Tensor Regression Layer

    Parameters
    ----------
    x : torch.tensor
        batch of inputs
    weight : FactorizedTensor
        factorized weights of the TRL
    bias : torch.Tensor, optional
        1D tensor, by default None

    Returns
    -------
    result
        input x contracted with regression weights
    """
    if isinstance(weight, TuckerTensor):
        return tucker_trl(x, weight, bias=bias, **kwargs)
    else:
        if bias is None:
            return tenalg.inner(x, weight.to_tensor(), n_modes=tl.ndim(x) - 1)
        else:
            return tenalg.inner(x, weight.to_tensor(),
                                n_modes=tl.ndim(x) - 1) + bias
Exemplo n.º 2
0
def tucker_trl(x, weight, project_input=False, bias=None):
    n_input = tl.ndim(x) - 1
    if project_input:
        x = tenalg.multi_mode_dot(x,
                                  weight.factors[:n_input],
                                  modes=range(1, n_input + 1),
                                  transpose=True)
        regression_weights = tenalg.multi_mode_dot(weight.core,
                                                   weight.factors[n_input:],
                                                   modes=range(
                                                       n_input, weight.order))
    else:
        regression_weights = weight.to_tensor()

    if bias is None:
        return tenalg.inner(x, regression_weights, n_modes=tl.ndim(x) - 1)
    else:
        return tenalg.inner(x, regression_weights,
                            n_modes=tl.ndim(x) - 1) + bias
Exemplo n.º 3
0
ALLDELTA = [delta1, delta2, delta3]


def nd_id(n, d):
    out = np.zeros((n, ) * d)
    out[[np.arange(n)] * d] = 1
    return out


ER = nd_id(3, 3)

#print("fdghkjdfhgkdj")
#identity tesnor that i should find
#print(ER)

ans = ten.inner(ALLU[0], ER, 1)
print(ans)
print("jksfkjsdf")
for i in range(0, P):
    for j in range(1, M):
        ans = ten.inner(ans, ALLU[j], j)
    b = w**P - ans
    sum2 = sum2 + LA.norm(b, 'fro')**2

sum3 = 0
Gamma = [2, 1, 3]
for n in range(0, M):
    sum3 = sum3 + Gamma[n] * LA.norm(ALLU[n], 1)
#print(LA.norm(b, 'fro'))
for m in range(1, M):
    for k in range(1, Nprim):
Exemplo n.º 4
0
 def forward(self, x):
     regression_weights = tl.tucker_to_tensor((self.core, self.factors))
     return inner(x, regression_weights, n_modes=tl.ndim(x) - 1) + self.bias