Exemplo n.º 1
0
    def inverse(self):
        """
            fast inverse computation making use of matrix structure
        :return:
        """
        with torch.no_grad():
            # t0 = time.time()
            self.LU_decomposition()
            # t1 = time.time()
            submat_inv = self.tri_submatrix_inverse()
            # t2 = time.time()
            reci = 1.0 / \
                   (self.last_diag - torch.chain_matmul(self.last_vec.view(1, -1), submat_inv, self.last_vec.view(-1, 1)).item())
            inv_vec = -reci * torch.matmul(submat_inv, self.last_vec)

            ans = torch.zeros(self.n, self.n)
            ans[:-1, :-1] = submat_inv + \
                            reci * torch.chain_matmul(submat_inv, self.last_vec.view(-1, 1),
                                                      self.last_vec.view(1, -1), submat_inv)
            ans[-1, :-1] = inv_vec
            ans[:-1, -1] = inv_vec
            ans[-1, -1] = reci
            # t3 = time.time()

        # print(t1 - t0, '+', t2 - t1, '+', t3 - t2)

        self.inv = ans
        return ans
Exemplo n.º 2
0
def vamp_score(data: torch.Tensor, data_lagged: torch.Tensor, method='VAMP2', epsilon: float = 1e-6, mode='trunc'):
    r"""Computes the VAMP score based on data and corresponding time-shifted data.

    Parameters
    ----------
    data : torch.Tensor
        (N, d)-dimensional torch tensor
    data_lagged : torch.Tensor
        (N, k)-dimensional torch tensor
    method : str, default='VAMP2'
        The scoring method. See :meth:`score <deeptime.decomposition.CovarianceKoopmanModel.score>` for details.
    epsilon : float, default=1e-6
        Cutoff parameter for small eigenvalues, alternatively regularization parameter.
    mode : str, default='trunc'
        Regularization mode for Hermetian inverse. See :meth:`sym_inverse`.

    Returns
    -------
    score : torch.Tensor
        The score. It contains a contribution of :math:`+1` for the constant singular function since the
        internally estimated Koopman operator is defined on a decorrelated basis set.
    """
    if method not in valid_score_methods:
        raise ValueError(f"Invalid method '{method}', supported are {valid_score_methods}")
    assert data.shape == data_lagged.shape

    if method == 'VAMP1':
        koopman = koopman_matrix(data, data_lagged, epsilon=epsilon, mode=mode)
        out = torch.norm(koopman, p='nuc')
    elif method == 'VAMP2':
        koopman = koopman_matrix(data, data_lagged, epsilon=epsilon, mode=mode)
        out = torch.pow(torch.norm(koopman, p='fro'), 2)
    elif method == 'VAMPE':
        c00, c0t, ctt = covariances(data, data_lagged, remove_mean=True)
        c00_sqrt_inv = sym_inverse(c00, epsilon=epsilon, return_sqrt=True, mode=mode)
        ctt_sqrt_inv = sym_inverse(ctt, epsilon=epsilon, return_sqrt=True, mode=mode)
        koopman = torch.chain_matmul(c00_sqrt_inv, c0t, ctt_sqrt_inv).t()

        u, s, v = torch.svd(koopman)
        mask = s > epsilon

        u = torch.mm(c00_sqrt_inv, u[:, mask])
        v = torch.mm(ctt_sqrt_inv, v[:, mask])
        s = s[mask]

        u_t = u.t()
        v_t = v.t()
        s = torch.diag(s)

        out = torch.trace(
            2. * torch.chain_matmul(s, u_t, c0t, v)
            - torch.chain_matmul(s, u_t, c00, u, s, v_t, ctt, v)
        )
    else:
        raise RuntimeError("This should have been caught earlier.")
    return 1 + out
Exemplo n.º 3
0
 def _compute_scoring_matrices(self, plda_dim):
     if self.plda_dim != plda_dim:
         self.plda_dim = plda_dim
         iSt = torch.inverse(self.St)
         iS = torch.inverse(self.St - torch.chain_matmul(self.Sb, iSt, self.Sb))
         Q = iSt - iS
         P = torch.chain_matmul(iSt, self.Sb, iS)
         U, s = torch.svd(P)[:2]
         self.l = s[:plda_dim]
         self.uk = U[:, :plda_dim]
         self.qhat = torch.chain_matmul(self.uk.t(), Q, self.uk)
Exemplo n.º 4
0
    def step(self, hessian_matrix):
        hess = -hessian_matrix
        v = torch.randn([hess.size()[0], 1], dtype=torch.float32)
        Mi = torch.ones_like(hess) + self.eta * hess
        for i in range(int(1 / (self.delta**2))):
            v = torch.mm(Mi, v)
            v = v / torch.norm(v)
        kick_criterion = torch.chain_matmul(v.transpose(0, 1), hess, v)
        print(kick_criterion.item())

        if (kick_criterion <= -self.delta / 2):
            print('kick')
            kick = self.bern.sample() * 2 - 1
            for group in self.param_groups:
                for ctr, p in enumerate(group['params']):
                    if p.grad is None:
                        continue
                    additive = (kick * group['delta'] * v[ctr]).cuda()
                    p.add_(additive)
        else:
            for group in self.param_groups:
                for p in group['params']:
                    if p.grad is None:
                        continue
                    d_p = p.grad
                    x_h = p
                    for s in range(group['p']):
                        x_list = [x_h]
                        for t in range(int(group['B'] / group['p'])):
                            d_tmp = d_p + group['sigma'] * (x_list[t] - x_h)
                            x_list.append(x_list[t].add_(
                                d_tmp, alpha=-group['alpha']))
                        temp = torch.stack(x_list)
                        x_h = torch.mean(temp, dim=0)
                    p = x_h
Exemplo n.º 5
0
def sym_inverse(mat,
                epsilon: float = 1e-6,
                return_sqrt=False,
                mode='regularize'):
    """ Utility function that returns the inverse of a matrix, with the
    option to return the square root of the inverse matrix.

    Parameters
    ----------
    mat: numpy array with shape [m,m]
        Matrix to be inverted.
    epsilon : float
        Cutoff for eigenvalues.
    return_sqrt: bool, optional, default = False
        if True, the square root of the inverse matrix is returned instead
    mode: str, default='trunc'
        Whether to truncate eigenvalues if they are too small or to regularize them by taking the absolute value
        and adding a small positive constant. :code:`trunc` leads to truncation, :code:`regularize` leads to epsilon
        being added to the eigenvalues after taking the absolute value

    Returns
    -------
    x_inv: numpy array with shape [m,m]
        inverse of the original matrix
    """
    eigval, eigvec = symeig_reg(mat, epsilon, mode)

    # Build the diagonal matrix with the filtered eigenvalues or square
    # root of the filtered eigenvalues according to the parameter
    if return_sqrt:
        diag = torch.diag(torch.sqrt(1. / eigval))
    else:
        diag = torch.diag(1. / eigval)

    return torch.chain_matmul(eigvec.t(), diag, eigvec)
Exemplo n.º 6
0
def DP(C, mu, nu, dx, dy, K, u, v, epsilon):
    d_opt = dot2d(1 - torch.exp(-u), torch.mul(mu, dx)) + dot2d(
        1 - torch.exp(-torch.min(C - u, 1)[0].reshape(dy.shape)), torch.mul(nu, dy))
    constrain = dot2d(1 - torch.exp(-u), torch.mul(mu, dx)) + dot2d(1 - torch.exp(-v), torch.mul(nu, dy))
    regular = epsilon * torch.chain_matmul(dx, torch.exp(-C / epsilon) - K, dy)
    reg_d_opt = constrain + regular
    return d_opt, reg_d_opt.squeeze()
Exemplo n.º 7
0
 def _solve_v_and_rescale(self, weight_mat, u, target_sigma):
     # Tries to returns a vector `v` s.t. `u = normalize(W @ v)`
     # (the invariant at top of this class) and `u @ W @ v = sigma`.
     # This uses pinverse in case W^T W is not invertible.
     v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(),
                            weight_mat.t(), u.unsqueeze(1)).squeeze(1)
     return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))
Exemplo n.º 8
0
def koopman_matrix(x: torch.Tensor, y: torch.Tensor, epsilon: float = 1e-6, mode: str = 'trunc',
                   c_xx: Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] = None) -> torch.Tensor:
    r""" Computes the Koopman matrix

    .. math:: K = C_{00}^{-1/2}C_{0t}C_{tt}^{-1/2}

    based on data over which the covariance matrices :math:`C_{\cdot\cdot}` are computed.

    Parameters
    ----------
    x : torch.Tensor
        Instantaneous data.
    y : torch.Tensor
        Time-lagged data.
    epsilon : float, default=1e-6
        Cutoff parameter for small eigenvalues.
    mode : str, default='trunc'
        Regularization mode for Hermetian inverse. See :meth:`sym_inverse`.
    c_xx : tuple of torch.Tensor, optional, default=None
        Tuple containing c00, c0t, ctt if already computed.

    Returns
    -------
    K : torch.Tensor
        The Koopman matrix.
    """
    if c_xx is not None:
        c00, c0t, ctt = c_xx
    else:
        c00, c0t, ctt = covariances(x, y, remove_mean=True)
    c00_sqrt_inv = sym_inverse(c00, return_sqrt=True, epsilon=epsilon, mode=mode)
    ctt_sqrt_inv = sym_inverse(ctt, return_sqrt=True, epsilon=epsilon, mode=mode)
    return torch.chain_matmul(c00_sqrt_inv, c0t, ctt_sqrt_inv).t()
Exemplo n.º 9
0
    def forward(self):
        out = torch.tensor([]).cuda()
        item_adj = self.item_adj
        x = self.feat
        pre_out = None

        entropy_loss = 0.0
        indepence_loss = 0.0 

        for index in range(len(self.layers)):
            conv_layer = self.conv_layer_list[index]
            x = conv_layer(x, item_adj)

            if self.has_act:
                x = F.leaky_relu(conv_layer(x, item_adj))

            weight = self.weight_list[index]
            temp_out = torch.matmul(x, weight)

            temp_out = torch.softmax(temp_out, dim=1)
            entropy_loss += (-temp_out * torch.log(temp_out+1e-15)).sum(dim=-1).mean()

            item_adj = torch.chain_matmul(temp_out.t(), item_adj, temp_out)

            if pre_out is not None:
                temp_out = torch.matmul(pre_out, temp_out)
            out = torch.cat((out, temp_out), dim=1)
            pre_out = temp_out
            x = weight.t()

            indepence_loss += torch.norm(torch.matmul(weight.t(), weight)-torch.eye(self.layers[index]).cuda(), p=2)/(self.layers[index]*self.layers[index])

        return out, entropy_loss, indepence_loss
Exemplo n.º 10
0
    def OAM(self, x):
        '''
            x size : (batch_size, depth, H, W)
        '''
        batch_size, depth = x.size(0), x.size(1)
        H, W = x.size(2), x.size(3)
        M = torch.ones(batch_size, H * W) / (H * W)

        D = torch.zeros(batch_size, H * W, H * W)
        # fill D with D(i*W+j, i'*W+j') = |x[i, j] - x[i', j']|
        # D is diagonal, so just have to do the math for uppper right corner
        for i in range(H):
            for j in range(W):
                for I in range(H):
                    for J in range(W):
                        if I < i or (I == i and J < j):
                            continue
                        D[:, i * W + j, I * W +
                          J] = (x[:, :, i, j] - x[:, :, I, J]).abs().sum(dim=1)
                        D[:, I * W + J, i * W + j] = D[:, i * W + j, I * W + J]

        # normalize D by out-bound edges (by row)
        D = nn.functional.normalize(D, 1, -1)
        # chain multiplication of D
        for i in range(batch_size):
            D[i] = torch.chain_matmul(*([D[i]] * 10))
            M[i] = torch.matmul(D[i], M[i])

        M = M.view(batch_size, H, W)

        return M
Exemplo n.º 11
0
def frobenius_norm(U, VT):

    m, r = U.shape
    r, n = VT.shape
    if m * n * r < r * r * (m + n):
        return torch.norm(torch.matmul(U, VT))
    return torch.sqrt(torch.trace(torch.chain_matmul(VT, VT.T, U.T, U)))
Exemplo n.º 12
0
    def eval_nystrom_eigenfunctions(self,t=None,m=None):
        # a adapter au GPU


        n1,n2 = (self.n1,self.n2)
        ntot = n1+n2
        m1,m2 = (self.nxlandmarks,self.nylandmarks)
        mtot = m1+m2

        mtot=self.nxlandmarks + self.nylandmarks
        ntot= self.n1 + self.n2
        
        t = 60   if t is None else t # pour éviter un calcul trop long # t= self.n1 + self.n2
        m = mtot if m is None else m
        
        kmn   = self.compute_kmn()
        Pbiny = self.compute_centering_matrix(sample='xy',quantization=True)
        Pbi   = self.compute_centering_matrix(sample='xy',quantization=False)
        
        Vny  = self.evny[:m]
        V    = self.ev[:t] 
        spny = self.spny[:m]
        sp   = self.sp[:t]

        # print(((spny * mtot)**(-1/2))[:3],'\n',((sp*ntot)**-(1/2))[:3])
        return( ((((spny * mtot)**(-1/2))*((sp*ntot)**-(1/2)*torch.chain_matmul(Vny,Pbiny,kmn,Pbi, V.T)).T).T).diag())
    def sample_energy(ceta, mean, cov, zi, n_gmm, bs):
        # print('calculate sample energy')
        e = torch.tensor(0.0)
        cov_eps = torch.eye(mean.shape[1]) * (1e-3)  # original constant: 1e-12
        #         cov_eps = cov_eps.to(device)
        for k in range(n_gmm):
            miu_k = mean[k].unsqueeze(1)
            d_k = zi - miu_k

            inv_cov = torch.inverse(cov[k] + cov_eps)

            # inv_cov = inv_cov.detach().numpy()
            # with open('test_log.txt', 'a') as f:
            #     if inv_cov.shape[0] != np.linalg.matrix_rank(inv_cov):
            #         f.write('matrix does not have full rank. \n')
            #     else:
            #         f.write('matrix have full rank. \n')
            #     f.close()
            # inv_cov = torch.tensor(inv_cov)
            # inv_cov.to('cuda')

            e_k = torch.exp(-0.5 *
                            torch.chain_matmul(torch.t(d_k), inv_cov, d_k))
            e_k = e_k / torch.sqrt(torch.abs(torch.det(2 * math.pi * cov[k])))
            e_k = e_k * ceta[k]
            e += e_k.squeeze()

        return -torch.log(e)
Exemplo n.º 14
0
Arquivo: kfac.py Projeto: kngwyu/Rainy
 def __fisher_grad(
     self, weight: Tensor, bias: Optional[Tensor], layer: Layer, state: dict
 ) -> Tuple[Tensor, Optional[Tensor]]:
     """Computes F^{-1}∇h"""
     grad = weight.grad.data
     if layer is Layer.CONV2D:
         grad = grad.view(grad.size(0), -1)
     if bias is not None:
         grad = torch.cat([grad, bias.grad.data.view(-1, 1)], dim=1)
     v1 = torch.chain_matmul(state["vg"].t(), grad, state["vx"])
     v2 = v1.div_(state["eg*ex"].add(self.gamma))
     grad = torch.chain_matmul(state["vg"], v2, state["vx"].t())
     gb = None
     if bias is not None:
         gb = grad[:, -1].reshape(bias.shape)
         gw = grad[:, :-1]
     return gw.reshape(weight.shape), gb
Exemplo n.º 15
0
    def forward(self, u_features, i_features):
        if self.apply_drop:
            u_features = self.dropout(u_features)
            i_features = self.dropout(i_features)
        if self.accum == 'stack':
            u_features = u_features.reshape(self.num_relations, -1,
                                            self.feature_dim)
            i_features = i_features.reshape(self.num_relations, -1,
                                            self.feature_dim)
            num_users = u_features.shape[1]
            num_items = i_features.shape[1]
        else:
            num_users = u_features.shape[0]
            num_items = i_features.shape[0]

        for relation in range(self.num_relations):
            q_matrix = torch.sum(
                self.coefs[relation].unsqueeze(1) * self.basis_matrix, 0)
            q_matrix = q_matrix.reshape(self.feature_dim, self.feature_dim)
            if self.accum == 'stack':
                if relation == 0:
                    out = torch.chain_matmul(
                        u_features[relation], q_matrix,
                        i_features[relation].t()).unsqueeze(-1)
                else:
                    out = torch.cat(
                        (out,
                         torch.chain_matmul(
                             u_features[relation], q_matrix,
                             i_features[relation].t()).unsqueeze(-1)),
                        dim=2)
            else:
                if relation == 0:
                    out = torch.chain_matmul(u_features, q_matrix,
                                             i_features.t()).unsqueeze(-1)
                else:
                    out = torch.cat(
                        (out,
                         torch.chain_matmul(u_features, q_matrix,
                                            i_features.t()).unsqueeze(-1)),
                        dim=2)

        out = out.reshape(num_users * num_items, -1)

        return out
    def objective(H,W):
        M = P - torch.chain_matmul(x, H, torch.transpose(W, 0, 1), torch.transpose(y, 0, 1))
        
        M[Ineg] *= alpha_rac
        M[~train_mask] = 0.
        
        L = torch.sum(M**2) + gamma/2 * (torch.sum(H**2) + torch.sum(W**2))

        return(L)
Exemplo n.º 17
0
    def reference(ref_data):
        t0 = torch.tensor(d0, requires_grad=True)
        t1 = torch.tensor(d1, requires_grad=True)
        t2 = torch.tensor(d2, requires_grad=True)
        o = torch.chain_matmul(t0, t1, t2)
        d__o = ref_data.getOutputTensorGrad(0)
        o.backward(torch.tensor(d__o))

        return [o, d__o, t0.grad, t1.grad, t2.grad]
    def constraint(z):
        z = torch.Tensor(z).to(device)
        H = z[:Fd*k].resize(Fd,k)
        W = z[-Ft*k:].resize(Ft,k)

        S = torch.chain_matmul(x, H, torch.transpose(W, 0, 1), torch.transpose(y, 0, 1))
        S = S.reshape((-1,)).cpu().detach().numpy()
        
        return(S)
Exemplo n.º 19
0
def _precond_grad_dense_dense(Ql, Qr, Grad):
    # type: (Tensor, Tensor, Tensor) -> Tensor
    """
    return preconditioned gradient using Kronecker product preconditioner
    Ql: (left side) Cholesky factor of preconditioner
    Qr: (right side) Cholesky factor of preconditioner
    Grad: (matrix) gradient
    """
    return torch.chain_matmul(Ql.t(), Ql, Grad, Qr.t(), Qr)
Exemplo n.º 20
0
def frobgrad(matrices):

    assert type(matrices) == list
    assert 1 <= len(matrices) <= 3
    output = [None for _ in matrices]
    if len(matrices) == 1:
        output[0] = matrices[0].clone()
    else:
        if len(matrices) == 2:
            U, VT = matrices
            UM, MVT = matrices
        else:
            U, M, VT = matrices
            UM, MVT = torch.matmul(U, M), torch.matmul(M, VT)
            output[1] = torch.chain_matmul(U.T, U, M, VT, VT.T)
        output[0] = torch.chain_matmul(U, MVT, MVT.T)
        output[-1] = torch.chain_matmul(UM.T, UM, VT)
    return output
Exemplo n.º 21
0
    def forward(self, x, tags_numbers, temperature=0.5):
        x = self.dropout(x)
        x = self.transform_nn(x)
        x = self.batch_norm(x)
        x = self.activation(x)

        x = self.dropout(x)

        x_list = matrix_to_list_by_length(x, tags_numbers)

        j = 0
        logits_list = list()
        for i, l in enumerate(self.proj_nn):
            if tags_numbers[i] > 0:
                logits_list.append(l(x_list[j]))
                j += 1

        dictionary_logits = torch.cat(logits_list, dim=0)
        dictionary_probs = softmax(dictionary_logits, dim=1)
        reverse_dictionary_probs = torch.square(1 - dictionary_probs)
        dictionary_gumbel_weights = custom_gumbel_softmax(dictionary_logits, temperature, hard=False)

        _, pos_inds = torch.topk(dictionary_gumbel_weights, self.k)  # neg_inds dim:  total_number_tags x k
        pos_inds_one_hot = one_hot(pos_inds, num_classes=self.c).sum(dim=1)
        neg_inds = (pos_inds_one_hot == 0).nonzero()[:, 1].reshape(-1, self.c - self.k)
        assert neg_inds.size(0) == dictionary_gumbel_weights.size(0)

        neg_inds_list = matrix_to_list_by_length(neg_inds, tags_numbers)

        neg_samples_list = list()
        j = 0
        for i, dic_embed in enumerate(self.dictionary_embeddings):
            if tags_numbers[i] > 0:
                for instance_index, neg_inds_of_this_index in enumerate(neg_inds_list[j]):
                    neg_samples_list.append(torch.index_select(dic_embed, 0, neg_inds_of_this_index))
                j += 1
        neg_samples_embeddings = torch.stack(neg_samples_list, dim=0)  # neg_samples_dim : total_number_tags x 14 x 768

        reverse_dictionary_probs_list = list()
        for i, neg_ind in enumerate(neg_inds):
            reverse_dictionary_probs_list.append(
                softmax(torch.index_select(reverse_dictionary_probs[i], 0, neg_ind), dim=0))
        reverse_dictionary_probs = torch.stack(reverse_dictionary_probs_list, dim=0)

        dictionary_weights_list = matrix_to_list_by_length(dictionary_gumbel_weights, tags_numbers)

        j = 0
        reconstructed_embeddings_list = list()
        for i, dic_embed in enumerate(self.dictionary_embeddings):
            if tags_numbers[i] > 0:
                reconstructed_embeddings_list.append(torch.chain_matmul(dictionary_weights_list[j], dic_embed))
                j += 1

        recontructed_embeddings = torch.cat(reconstructed_embeddings_list, dim=0)

        return dictionary_gumbel_weights, recontructed_embeddings, dictionary_logits, neg_samples_embeddings, reverse_dictionary_probs
    def total_loss(x,H,W,y,P):
        M = P - torch.chain_matmul(x, H, torch.transpose(W, 0, 1), torch.transpose(y, 0, 1))

        M[~train_mask] = 0.

        M[neg_train] *= alpha_rac

        L = torch.mean(M**2) + gamma/2 * (torch.sum(H**2) + torch.sum(W**2))

        return(L)
Exemplo n.º 23
0
 def forward(self, inputs, adjacency_matrix):
     '''
     inputs: shape = [num_entity, embedding_dim]
     '''
     outputs = torch.chain_matmul(adjacency_matrix, inputs, self.weights)
     # support = torch.mm(inputs, self.weights)
     # outputs = torch.spmm(inputs, support)
     if self.bias is not None:
         outputs += self.bias
     return outputs
Exemplo n.º 24
0
 def f(a, b):
     x1 = a * b
     x2 = x1 * b
     x3 = x2 * a
     x4 = x3 / b
     x5 = x4 / a
     x6 = x5 / b
     x7 = x6 * a
     x8 = x7 * b
     return x8 * torch.chain_matmul(x8, x8)
Exemplo n.º 25
0
def _transform_around_point(point: Tuple[float, float],
                            transform_matrix: torch.Tensor) -> torch.Tensor:
    return cast(
        torch.Tensor,
        torch.chain_matmul(
            _create_motif_translation_matrix(point, inverse=False),
            transform_matrix,
            _create_motif_translation_matrix(point, inverse=True),
        ),
    )
Exemplo n.º 26
0
    def _get_credit(self, user_idx, X, tilde_X):
        # Please refer to Eq.8.
        Minv, tilde_Minv = self.Minv[user_idx], self.tilde_Minv[user_idx]

        tilde_Minv_tilde_X = torch.mm(tilde_Minv, tilde_X.T)
        result_a = torch.chain_matmul(X, Minv, tilde_Minv_tilde_X)
        result_b = 1 + (tilde_X.T * tilde_Minv_tilde_X).sum(dim=0)
        norm_M = result_a.norm(dim=0)

        return norm_M * norm_M / result_b
Exemplo n.º 27
0
def _precond_grad_dense_scale(Ql, qr, Grad):
    # type: (Tensor, Tensor, Tensor) -> Tensor
    """
    return preconditioned gradient using (dense, scaling) Kronecker product preconditioner
    Suppose Grad has shape (M, N)
    Ql: shape (M, M), (left side) Cholesky factor of preconditioner
    qr: shape (1, N), defines a diagonal matrix for output feature scaling
    Grad: (matrix) gradient
    """
    return torch.chain_matmul(Ql.t(), Ql, Grad*(qr*qr))
Exemplo n.º 28
0
 def blas_lapack_ops(self):
     m = torch.randn(3, 3)
     a = torch.randn(10, 3, 4)
     b = torch.randn(10, 4, 3)
     v = torch.randn(3)
     return (
         torch.addbmm(m, a, b),
         torch.addmm(torch.randn(2, 3), torch.randn(2, 3),
                     torch.randn(3, 3)),
         torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
         torch.addr(torch.zeros(3, 3), v, v),
         torch.baddbmm(m, a, b),
         torch.bmm(a, b),
         torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3),
                            torch.randn(3, 3)),
         # torch.cholesky(a), # deprecated
         torch.cholesky_inverse(torch.randn(3, 3)),
         torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
         torch.dot(v, v),
         torch.eig(m),
         torch.geqrf(a),
         torch.ger(v, v),
         torch.inner(m, m),
         torch.inverse(m),
         torch.det(m),
         torch.logdet(m),
         torch.slogdet(m),
         torch.lstsq(m, m),
         torch.lu(m),
         torch.lu_solve(m, *torch.lu(m)),
         torch.lu_unpack(*torch.lu(m)),
         torch.matmul(m, m),
         torch.matrix_power(m, 2),
         # torch.matrix_rank(m),
         torch.matrix_exp(m),
         torch.mm(m, m),
         torch.mv(m, v),
         # torch.orgqr(a, m),
         # torch.ormqr(a, m, v),
         torch.outer(v, v),
         torch.pinverse(m),
         # torch.qr(a),
         torch.solve(m, m),
         torch.svd(a),
         # torch.svd_lowrank(a),
         # torch.pca_lowrank(a),
         # torch.symeig(a), # deprecated
         # torch.lobpcg(a, b), # not supported
         torch.trapz(m, m),
         torch.trapezoid(m, m),
         torch.cumulative_trapezoid(m, m),
         # torch.triangular_solve(m, m),
         torch.vdot(v, v),
     )
Exemplo n.º 29
0
def warp(pos1, depth1, intrinsics1, pose1, bbox1, depth2, intrinsics2, pose2,
         bbox2):
    device = pos1.device

    # 根据上采样以后的 (32, 32) pos1(position) 从 (256, 256) 的深度图中采样(插值)深度
    # Z1:采样后的(32, 32)深度图
    # pos1: (32, 32)个坐标中有效的坐标,有效的定义是深度要大于0并且坐标没有越界
    # id1:有效的pos1的下标
    Z1, pos1, ids = interpolate_depth(pos1, depth1)

    # COLMAP convention # pos1(position)加入 bbox1(原图裁剪的左上角坐标) 的偏移 再加个 0.5
    # 应该是为了取整
    u1 = pos1[1, :] + bbox1[1] + .5  # 像素横坐标
    v1 = pos1[0, :] + bbox1[0] + .5  # 像素纵坐标

    X1 = (u1 - intrinsics1[0, 2]) * (Z1 / intrinsics1[0, 0])
    Y1 = (v1 - intrinsics1[1, 2]) * (Z1 / intrinsics1[1, 1])

    XYZ1_hom = torch.cat([
        X1.view(1, -1),
        Y1.view(1, -1),
        Z1.view(1, -1),
        torch.ones(1, Z1.size(0), device=device)
    ],
                         dim=0)  # 相机1齐次坐标
    XYZ2_hom = torch.chain_matmul(pose2, torch.inverse(pose1),
                                  XYZ1_hom)  # 相机2齐次坐标
    XYZ2 = XYZ2_hom[:-1, :] / XYZ2_hom[-1, :].view(1, -1)  # 相机2归一化坐标

    uv2_hom = torch.matmul(intrinsics2, XYZ2)  # 图2像素齐次坐标
    uv2 = uv2_hom[:-1, :] / uv2_hom[-1, :].view(1, -1)  # 图2像素坐标
    # 加入 bbox2 在图像2 裁剪的偏移, 将 uv2 转换到裁剪后 (256, 256) 的图像上的坐标
    u2 = uv2[0, :] - bbox2[1] - .5
    v2 = uv2[1, :] - bbox2[0] - .5
    uv2 = torch.cat([u2.view(1, -1), v2.view(1, -1)], dim=0)

    # uv_to_pos 是为了交换 array 的 xy, uv 是 (x, y), 而 pos 是 (y, x)
    annotated_depth, pos2, new_ids = interpolate_depth(uv_to_pos(uv2), depth2)
    # 再从 图像2 的深度图中筛选一遍 深度大于0 并且没有越界的像素点
    ids = ids[new_ids]
    pos1 = pos1[:, new_ids]
    estimated_depth = XYZ2[2, new_ids]
    # 再筛选一遍估计和采样的深度误差在 0.05 范围内的点
    inlier_mask = torch.abs(estimated_depth - annotated_depth) < 0.05

    ids = ids[inlier_mask]
    if ids.size(0) == 0:
        raise EmptyTensorError

    pos2 = pos2[:, inlier_mask]
    pos1 = pos1[:, inlier_mask]

    return pos1, pos2, ids
Exemplo n.º 30
0
    def eval_nystrom_discriminant_axis(self,nystrom=1,t=None,m=None):
        # a adapter au GPU
        # j'ai pas du tout réfléchi à la version test_data, j'ai juste fait en sorte que ça marche au niveau des tailles de matrices donc il y a peut-être des erreurs de logique

        n1,n2 = (self.n1,self.n2)
        ntot = n1+n2
        m1,m2 = (self.nxlandmarks,self.nylandmarks)
        mtot = m1+m2

        mtot=self.nxlandmarks + self.nylandmarks
        ntot= self.n1 + self.n2
        
        t = 60   if t is None else t # pour éviter un calcul trop long # t= self.n1 + self.n2
        m = mtot if m is None else m
        
        kmn   = self.compute_kmn(test_data=False)
        kmn_test   = self.compute_kmn(test_data=True)
        kny   = self.compute_gram(landmarks=True)
        k     = self.compute_gram(landmarks=False,test_data=False)
        Pbiny = self.compute_centering_matrix(sample='xy',quantization=True)
        Pbi   = self.compute_centering_matrix(sample='xy',quantization=False,test_data=False)
        
        Vny  = self.evny[:m]
        V    = self.ev[:t] 
        spny = self.spny[:m]
        sp   = self.sp[:t]

        mny1   = -1/m1 * torch.ones(m1, dtype=torch.float64) #, device=device) 
        mny2   = 1/m2 * torch.ones(m2, dtype=torch.float64) # , device=device)
        m_mtot = torch.cat((mny1, mny2), dim=0) # .to(device)
        

        mn1    = -1/n1 * torch.ones(n1, dtype=torch.float64) # , device=device)
        mn2    = 1/n2 * torch.ones(n2, dtype=torch.float64) # , device=device) 
        m_ntot = torch.cat((mn1, mn2), dim=0) #.to(device)
        
        # mn1_test    = -1/n1_test * torch.ones(n1_test, dtype=torch.float64) # , device=device)
        # mn2_test    = 1/n2_test * torch.ones(n2_test, dtype=torch.float64) # , device=device) 
        # m_ntot_test = torch.cat((mn1_test, mn2_test), dim=0) #.to(device)
        
        vpkm    = mv(torch.chain_matmul(V,Pbi,k),m_ntot)
        vpkm_ny = mv(torch.chain_matmul(Vny,Pbiny,kmn_test),m_ntot_test) if nystrom==1 else \
                  mv(torch.chain_matmul(Vny,Pbiny,kny),m_mtot)

        norm_h   = (ntot**-1 * sp**-2   * mv(torch.chain_matmul(V,Pbi,k),m_ntot)**2).sum()**(1/2)
        norm_hny = (mtot**-1 * spny**-2 * mv(torch.chain_matmul(Vny,Pbiny,kmn_test),m_ntot_test)**2).sum()**(1/2) if nystrom==1 else \
                   (mtot**-1 * spny**-2 * mv(torch.chain_matmul(Vny,Pbiny,kny),m_mtot)**2).sum()**(1/2)

        # print(norm_h,norm_hny)

        A = torch.zeros(m,t,dtype=torch.float64).addr(1/mtot*spny,1/ntot*sp) # A = outer(1/mtot*self.spny,1/ntot*self.sp)
        B = torch.zeros(m,t,dtype=torch.float64).addr(vpkm_ny,vpkm) # B = torch.outer(vpkm_ny,vpkm)
        C = torch.chain_matmul(Vny,Pbiny,kmn,Pbi,V.T)

        return(norm_h**-1*norm_hny**-1*(A*B*C).sum().item()) # <h_ny^* , h^*>/(||h_ny^*|| ||h^*||)