Пример #1
0
    def forward(self, inputs: Tensor, supports: List[Tensor]):
        """
        :param inputs: tensor, [B, N, input_dim]
        :param supports: list of sparse tensors, each of shape [N, N]
        :return: tensor, [B, N, output_dim]
        """
        b, n, input_dim = inputs.shape
        x = inputs
        x0 = x.permute([1, 2,
                        0]).reshape(n,
                                    -1)  # (num_nodes, input_dim * batch_size)
        x = x0.unsqueeze(dim=0)  # (1, num_nodes, input_dim * batch_size)

        if self._max_diffusion_step == 0:
            pass
        else:
            for support in supports:
                x1 = sparse.mm(support, x0)
                x = self._concat(x, x1)
                for k in range(2, self._max_diffusion_step + 1):
                    x2 = 2 * sparse.mm(support, x1) - x0
                    x = self._concat(x, x2)
                    x1, x0 = x2, x1

        x = x.reshape(-1, n, input_dim, b).transpose(
            0, 3)  # (batch_size, num_nodes, input_dim, num_matrices)
        x = x.reshape(b, n,
                      -1)  # (batch_size, num_nodes, input_dim * num_matrices)

        return self.out(x)  # (batch_size, num_nodes, output_dim)
Пример #2
0
 def forward(self, x, adj):
     if x.is_sparse:
         x = sparse.mm(x, self.weight)
     else:
         x = F.dropout(x, p=self.dropout, training=self.training)
         x = torch.mm(x, self.weight)
     x = sparse.mm(adj, x)
     return x
    def forward(self):

        feature_u_drop = sparse_drop(self.feature_u,
                                     self.drop_out) / (1.0 - self.drop_out)
        feature_v_drop = sparse_drop(self.feature_v,
                                     self.drop_out) / (1.0 - self.drop_out)

        hidden_feature_u = []
        hidden_feature_v = []

        W_list = torch.split(self.W, self.rate_num)
        W_flat = []
        for i in range(self.rate_num):
            Wr = W_list[0][i]
            M_u = self.all_M_u[i]
            M_v = self.all_M_v[i]
            hidden_u = sp.mm(feature_v_drop, Wr)
            hidden_u = self.reLU(sp.mm(M_u, hidden_u))

            ### need to further process M, normalization
            hidden_v = sp.mm(feature_u_drop, Wr)
            hidden_v = self.reLU(sp.mm(M_v, hidden_v))

            hidden_feature_u.append(hidden_u)
            hidden_feature_v.append(hidden_v)

            W_flat.append(Wr)

        hidden_feature_u = torch.cat(hidden_feature_u, dim=1)
        hidden_feature_v = torch.cat(hidden_feature_v, dim=1)
        W_flat = torch.cat(W_flat, dim=1)

        cat_u = torch.cat((hidden_feature_u, torch.mm(self.feature_u, W_flat)),
                          dim=1)
        cat_v = torch.cat((hidden_feature_v, torch.mm(self.feature_v, W_flat)),
                          dim=1)

        if self.use_side:
            side_hidden_feature_u = self.linear_layer_side_u(
                self.side_feature_u)
            side_hidden_feature_v = self.linear_layer_side_v(
                self.side_feature_v)

            cat_u = torch.cat((cat_u, side_hidden_feature_u), dim=1)
            cat_v = torch.cat((cat_v, side_hidden_feature_v), dim=1)

        embed_u = self.linear_cat_u(cat_u)
        embed_v = self.linear_cat_v(cat_v)

        score = []
        Q_list = torch.split(self.Q, self.rate_num)
        for i in range(self.rate_num):
            Qr = Q_list[0][i]

            tem = torch.mm(torch.mm(embed_u, Qr), torch.t(embed_v))

            score.append(tem)
        return torch.stack(score)
Пример #4
0
 def forward(ctx, bias, weight: sparse.FloatTensor, dense_weight_placeholder, inp):
     if bias is None:
         out = sparse.mm(weight, inp)
     else:
         out = sparse.addmm(bias, weight, inp)
     ctx.save_for_backward(bias, weight, inp)
     return out
Пример #5
0
    def forward(self, x, supports):
        """
        GGCM
        :param inputs: [pathch_size,batch_size,N,input_dim]
        :param supports: list of tensors, each tensor is with shape [N, N]
        :return: [batch_size,N,output_dim]
        """
        inputs = x
        p, b, n, input_dim = x.shape
        x = x.permute([2, 1, 0, 3]).reshape(
            n, -1)  # N, batch, patch_szie, input_dim  -->  N, -1
        # x = x0.unsqueeze(0)
        #
        # for support in supports:
        #     x1 = support.mm(x0)
        #     x = self._concat(x, x1)
        #     for k in range(2, self.max_diffusion_step+1):
        #         x2 = 2 * support.mm(x1) - x0
        #         x = self._concat(x, x2)
        #         x1, x0 = x2, x1
        #
        # x = x.view(-1, n, b, p * input_dim).permute([2,1,0,])

        x = sparse.mm(supports, x)
        x = x.reshape(n, b, p * input_dim).transpose(0,
                                                     1)  # batch,N,p*input_dim
        linear_out = self.linear(x)
        x, gate = linear_out.chunk(2, 2)

        inputs = inputs.permute([1, 2, 0,
                                 3]).reshape(b, n, -1)  # batch, N,p*input_dim
        inputs = self.linear2(inputs)
        return (x + inputs) * self.activate(gate)  # batch,N,output_dim
Пример #6
0
    def forward(self, edge_nodes, edge_feats):
        """
        :param edge_nodes: Matrix indicating the nodes which each edge in the
        batch connects. Shape [B, N].
        :param edge_feats: Features of *all* edges in the graph. Shape [E, D].
        :return: Hidden representation of shape [B, K].
        """
        # Get edges incident to the left and right nodes of each edge in the
        # batch. Result has shape [B, E].
        batch_edge_idcs = sp.mm(self.inc_matrix.transpose(1, 0),
                                edge_nodes.transpose(1, 0)).transpose(1, 0)
        # Normalise matrix row-wise such that edge features are averaged, not
        # summed.
        row_sum = torch.sum(batch_edge_idcs, dim=1)
        inv = 1.0 / row_sum
        inv[torch.isinf(inv)] = 0.0
        batch_edge_idcs = batch_edge_idcs * inv.view(-1, 1)

        # Compute hidden representations from edge_features
        h_edges = edge_feats
        for idx in range(len(self.lin_layers)):
            h_edges = self.lin_layers[h_edges]
            if idx < len(self.lin_layers) - 1:
                h_edges = F.relu(h_edges)
                h_edges = self.bns[idx](h_edges)

        # Obtain features of each of these edges
        h = torch.spmm(batch_edge_idcs, h_edges)  # [B, K]

        return h
Пример #7
0
    def forward(self, norm_adj):
        """ Perform GNN function on users and item embeddings
        Args:
            norm_adj (torch sparse tensor): the norm adjacent matrix of the user-item interaction matrix
        Returns:
            u_g_embeddings (tensor): processed user embeddings
            i_g_embeddings (tensor): processed item embeddings
        """
        ego_embeddings = torch.cat(
            (self.user_embedding.weight, self.item_embedding.weight), dim=0
        )
        ego_embeddings = ego_embeddings.to(torch.float32)
        all_embeddings = [ego_embeddings]

        norm_adj = norm_adj.to(self.device)
        norm_adj = norm_adj.to(torch.float32)
        for i in range(self.n_layers):
            side_embeddings = sparse.mm(norm_adj, ego_embeddings)
            sum_embeddings = F.leaky_relu(self.GC_weights[i](side_embeddings))
            bi_embeddings = torch.mul(ego_embeddings, side_embeddings)
            bi_embeddings = F.leaky_relu(self.Bi_weights[i](bi_embeddings))
            ego_embeddings = sum_embeddings + bi_embeddings
            ego_embeddings = self.dropout[i](ego_embeddings)

            norm_embeddings = F.normalize(ego_embeddings, p=2, dim=1)
            all_embeddings += [norm_embeddings]

        all_embeddings = torch.cat(all_embeddings, dim=1)
        u_g_embeddings, i_g_embeddings = torch.split(
            all_embeddings, [self.n_users, self.n_items], dim=0
        )
        return u_g_embeddings, i_g_embeddings
Пример #8
0
 def forward(self, inputs, adj):
     N = inputs.size()[0]
     ones = torch.ones(size=(N, 1), dtype=torch.float32)
     if self.is_cuda:
         ones = ones.cuda()
     adj_exp = torch.sparse_coo_tensor(adj.indices(),
                                       torch.exp(adj.values()),
                                       size=torch.Size((N, N)))
     inputs = torch.mul(inputs, self.W)  # todo: dot product
     # for relation weighting
     hidden = sparse.mm(adj_exp, inputs)
     # print('max', torch.max(adj_exp), 'min', torch.min(adj_exp))
     rowsum = sparse.mm(adj_exp, ones)
     # print('rowsum', rowsum.size())
     hidden = hidden.div(rowsum)
     # print('hidden: ', hidden.size())
     output = F.elu(hidden)
     return output
Пример #9
0
    def forward(self, inputs, adj):
        N = inputs.size()[0]
        ones = torch.ones(size=(N, 1), dtype=torch.float32)
        if self.is_cuda:
            ones = ones.cuda()

        edge = adj.indices()
        h = torch.mul(inputs, self.W)  # todo: dot product

        # for relation weighting
        # h_prime2 = sparse.mm(adj, h)
        # adj_row_sum = torch.mm(adj, ones)
        # h_prime2 = h_prime2.div(adj_row_sum)

        assert not torch.isnan(h).any()
        edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
        edge_e = torch.exp(self.leakyrelu(self.a.mm(edge_h).squeeze()))
        assert not torch.isnan(edge_e).any()

        # for relation weighting
        # edge_e = edge_e * adj.values()

        e_rowsum = sparse.mm(torch.sparse_coo_tensor(edge, edge_e), ones)
        h_prime = sparse.mm(torch.sparse_coo_tensor(edge, edge_e), h)

        assert not torch.isnan(h_prime).any()
        h_prime = h_prime.div(e_rowsum)
        assert not torch.isnan(h_prime).any()
        # h_prime = (h_prime2 + h_prime) / 2
        if self.concat:
            output = F.elu(h_prime)
        else:
            output = h_prime

        if self.residual:
            output = inputs + output
            assert output.size() == inputs.size()
        return output
Пример #10
0
    def forward(self, edge_nodes, adj_matrix, inc_matrix, edge_feats):
        """
        :param edge_nodes: Matrix indicating the nodes which each edge in the
        batch connects. Shape [B, N]
        :param adj_matrix: Sparse adjacency matrix of the graph of shape
        [N, N]. Must contain only 1-entries (i.e. should not be normalised).
        :param inc_matrix: Sparse incidence matrix of the graph of shape
        [N, E].
        :param edge_feats: Features of *all* edges in the graph. Shape [E, D].
        :return: Hidden representation of shape [B, K].
        """
        # Get edges incident to the left and right nodes of each edge in the
        # batch. Result has shape [B, E].
        # In essence, it computes BxN * NxN * NxE
        # = edge_nodes * adj_matrix * inc_matrix.
        batch_edge_idcs = sp.mm(adj_matrix.transpose(1, 0),
                                edge_nodes.transpose(1, 0))
        batch_edge_idcs = sp.mm(inc_matrix.transpose(1, 0),
                                batch_edge_idcs).transpose(1, 0)
        # Find exactly those edges which are two "hops" away from the edge
        # in the batch
        batch_edge_idcs = (batch_edge_idcs == 2.0).float()
        # Normalise matrix row-wise such that edge features are averaged, not
        # summed.
        row_sum = torch.sum(batch_edge_idcs, dim=1)
        inv = 1.0 / row_sum
        inv[torch.isinf(inv)] = 0.0
        batch_edge_idcs = batch_edge_idcs * inv.view(-1, 1)

        # Compute hidden representations from edge_features
        h_edges = torch.mm(edge_feats, self.weight) + self.bias  # [E, K]

        # Obtain features of each of these edges
        h = torch.spmm(batch_edge_idcs, h_edges)  # [B, K]

        return h
Пример #11
0
    def dot(self, other):
        """Wrapper of `torch.sparse.mm`. Perform tensor dot operation with
        another tensor, including vector inner product, matrix multiplication
        and general tensor dot.

        Parameters
        ----------
        other : DTensor or STensor
            The second operand of dot operation.

        Returns
        -------
        DTensor or STensor
            Tensor dot result as a DTensor or a STensor or a scalar value.
        """
        return tsparse.mm(self._data, other._data)
Пример #12
0
    def sparse_mm_broadcasting(self, flattened_kernel, flattened_input):
        """
        :param flattened_kernel: Sparse matrix, size (m, n).
        :param flattened_input: Batched dense matrices, size (b, n, k).
        :return: The batched matrix-matrix product, size (m, n) x (b, n, k) = (b, m, k).
        """
        batch_size = flattened_input.shape[0]
        # Stack the vector batch into columns. (b, n, k) -> (n, b, k) -> (n, b*k)
        vectors = flattened_input.transpose(0, 1).reshape(
            flattened_kernel.shape[1], -1)

        # A matrix-matrix product is a batched matrix-vector product of the columns.
        # And then reverse the reshaping. (m, n) x (n, b*k) = (m, b*k) -> (m, b, k) -> (b, m, k)
        return sparse.mm(flattened_kernel,
                         vectors).reshape(flattened_kernel.shape[0],
                                          batch_size, -1).transpose(1, 0)
Пример #13
0
    def forward(self, x, adj):
        if x.is_sparse:
            wh = sparse.mm(x, self.weight).view(-1, self.nheads,
                                                self.out_features)
        else:
            x = F.dropout(x, p=self.dropout, training=self.training)
            wh = torch.mm(x, self.weight).view(-1, self.nheads,
                                               self.out_features)

        awh_i = (wh * self.linear_i).sum(dim=2)
        awh_j = (wh * self.linear_j).sum(dim=2)

        idx_i, idx_j = adj._indices()

        e_values = F.leaky_relu(awh_i[idx_i] + awh_j[idx_j],
                                negative_slope=self.alpha)

        e = sparse.FloatTensor(adj._indices(), e_values)
        a = sparse.softmax(e.cpu(), dim=1).to(e.device)

        # Choose memory / speed tradeoff
        # keep_sparse = True  : Loop through sparse tensor (Low memory usage / Slow)
        # keep_sparse = False : Convert sparse tensor to dense tensor (High memory usage / Fast)
        # Both methods return almost identical results
        keep_sparse = False

        if keep_sparse:
            x = torch.cat([(a[i]._values().unsqueeze(dim=2) *
                            wh[a[i]._indices()[0]]).sum(dim=0, keepdim=True)
                           for i in range(x.shape[0])],
                          dim=0)
        else:
            a = a.to_dense().unsqueeze(dim=3)
            wh = wh.unsqueeze(dim=0)
            x = (a * wh).sum(dim=1)

        if self.concat:
            return x.flatten(start_dim=1)
        else:
            return x.mean(dim=1)
Пример #14
0
def ilr(p, basis):
    return mm(basis, torch.log(p).T).T
Пример #15
0
    def forward(self):

        feature_u_drop = sparse_drop(self.feature_u,
                                     self.drop_out) / (1.0 - self.drop_out)
        feature_v_drop = sparse_drop(self.feature_v,
                                     self.drop_out) / (1.0 - self.drop_out)

        hidden_feature_u = []
        hidden_feature_v = []

        W_list = torch.split(self.W, self.rate_num)
        if self.use_GAT:
            adj_u_list = torch.split(self.adj_u, self.rate_num)
            adj_v_list = torch.split(self.adj_v, self.rate_num)
        W_flat = []
        for i in range(self.rate_num):
            Wr = W_list[0][i]
            if self.use_GAT:
                adj_u = adj_u_list[0][i]
                adj_v = adj_v_list[0][i]
                att_u = self._calculate_attention(self.side_feature_u, adj_u,
                                                  self.W_att_u)
                att_v = self._calculate_attention(self.side_feature_v, adj_v,
                                                  self.W_att_v)
            M_u = self.all_M_u[i]
            M_v = self.all_M_v[i]
            hidden_u = sp.mm(feature_v_drop, Wr)
            if self.use_GAT:
                hidden_u = torch.matmul(att_v, hidden_u)
            hidden_u = self.reLU(sp.mm(M_u, hidden_u))

            hidden_v = sp.mm(feature_u_drop, Wr)
            if self.use_GAT:
                hidden_v = torch.matmul(att_u, hidden_v)
            hidden_v = self.reLU(sp.mm(M_v, hidden_v))

            hidden_feature_u.append(hidden_u)
            hidden_feature_v.append(hidden_v)

            W_flat.append(Wr)

        hidden_feature_u = torch.cat(hidden_feature_u, dim=1)
        hidden_feature_v = torch.cat(hidden_feature_v, dim=1)
        W_flat = torch.cat(W_flat, dim=1)

        cat_u = torch.cat((hidden_feature_u, torch.mm(self.feature_u, W_flat)),
                          dim=1)
        cat_v = torch.cat((hidden_feature_v, torch.mm(self.feature_v, W_flat)),
                          dim=1)

        if self.use_side:
            side_hidden_feature_u = self.linear_layer_side_u(
                self.side_feature_u)
            side_hidden_feature_v = self.linear_layer_side_v(
                self.side_feature_v)

            cat_u = torch.cat((cat_u, side_hidden_feature_u), dim=1)
            cat_v = torch.cat((cat_v, side_hidden_feature_v), dim=1)

        embed_u = self.linear_cat_u(cat_u)
        embed_v = self.linear_cat_v(cat_v)

        score = []
        Q_list = torch.split(self.Q, self.rate_num)
        for i in range(self.rate_num):
            Qr = Q_list[0][i]

            tem = torch.mm(torch.mm(embed_u, Qr), torch.t(embed_v))  # 对应公式(12)

            score.append(tem)
        return torch.stack(score)
Пример #16
0
    def forward(self):
        # dropout
        feature_u_drop = sparse_drop(self.feature_u, self.drop_out) / (
            1.0 - self.drop_out)  # (943,2625)
        feature_v_drop = sparse_drop(self.feature_v, self.drop_out) / (
            1.0 - self.drop_out)  # (1682,2625)

        hidden_feature_u = []
        hidden_feature_v = []

        # self.W (5,2625,5) -> (rate_num, feature_dim, hidden_dim)
        W_list = torch.split(self.W, self.rate_num)
        W_flat = []
        for i in range(self.rate_num):
            Wr = W_list[0][i]  # (2625,5)
            M_u = self.all_M_u[i]  # (943,1682)
            M_v = self.all_M_v[i]  # (1682,943)
            hidden_u = sp.mm(feature_v_drop,
                             Wr)  # (1682,2625) * (2625,5) -> (1682,5)
            hidden_u = self.reLU(sp.mm(M_u, hidden_u))  # (943,1682) * (1682,5)

            ### need to further process M, normalization
            hidden_v = sp.mm(feature_u_drop,
                             Wr)  # (943,2625) * (2625,5) -> (943,5)
            hidden_v = self.reLU(sp.mm(M_v, hidden_v))  # (1682,943) * (943,5)

            hidden_feature_u.append(hidden_u)
            hidden_feature_v.append(hidden_v)

            W_flat.append(Wr)  # 每个分数评价下的参数矩阵的存储

        hidden_feature_u = torch.cat(hidden_feature_u, dim=1)  # (943,25)
        hidden_feature_v = torch.cat(hidden_feature_v, dim=1)  # (1682,25)
        W_flat = torch.cat(W_flat, dim=1)  # (2625,25)

        cat_u = torch.cat(
            (hidden_feature_u, torch.mm(self.feature_u, W_flat)),
            dim=1)  # (943,50) = (943,25) + (943,2625) * (2625,25)
        cat_v = torch.cat(
            (hidden_feature_v, torch.mm(self.feature_v, W_flat)),
            dim=1)  # (1682,50) = (1682,25) + (1682,2625) * (2625,25)

        if self.use_side:
            side_hidden_feature_u = self.linear_layer_side_u(
                self.side_feature_u)
            side_hidden_feature_v = self.linear_layer_side_v(
                self.side_feature_v)

            cat_u = torch.cat((cat_u, side_hidden_feature_u), dim=1)
            cat_v = torch.cat((cat_v, side_hidden_feature_v), dim=1)

        embed_u = self.linear_cat_u(cat_u)  # nn.linear -> (943,50) -> (943,5)
        embed_v = self.linear_cat_v(
            cat_v)  # nn.linear -> (1682,50) -> (1682,5)

        score = []
        Q_list = torch.split(self.Q, self.rate_num)  # self.Q = (5,5,5)  参数共享矩阵
        for i in range(self.rate_num):
            Qr = Q_list[0][i]  # (5,5)

            tem = torch.mm(
                torch.mm(embed_u, Qr),
                torch.t(embed_v))  # (943,5)*(5,5)*(5,1682) -> (943,1682)

            score.append(tem)
        return torch.stack(score)
Пример #17
0
 def forward(self, adj, size):
     val = t.ones(size)
     x = sp.FloatTensor(adj, val, size=(size, size))
     x = sp.mm(x, self.weight)
Пример #18
0
def graph_norm(X, laplacian):
    norm = torch.mm(torch.transpose(X, 0, 1), tsps.mm(laplacian, X))
    return norm
Пример #19
0
    def __call__(self, data):
        assert data.face is not None
        assert data.face_curvature is not None
        assert data.face_weight is not None

        # Prepare the initial local coordinate system
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        # device = torch.device('cpu')
        norms = data.norm.to(device)
        positions = data.pos.to(device)
        faces = data.face.to(device)

        face_id = torch.tensor(list(range(len(faces.t())))).to(
            device)  # checked
        face0 = faces[0]  # checked
        face1 = faces[1]  # checked
        face2 = faces[2]  # checked

        weights = data.face_weight  # checked
        f_curv = data.face_curvature

        face0 = torch.stack((face_id, face0), dim=0).t()  # checked
        face1 = torch.stack((face_id, face1), dim=0).t()  # checked
        face2 = torch.stack((face_id, face2), dim=0).t()  # checked
        weights = data.face_weight * torch.ones(len(face0),
                                                dtype=torch.long).to(device)
        sparse_size = torch.Size((faces.shape[1], len(positions)))  # checked

        sparse_face0 = tsp.FloatTensor(
            torch.LongTensor(face0).t(), weights,
            sparse_size).to(device)  # .to_dense()  # checked
        sparse_face1 = tsp.FloatTensor(
            torch.LongTensor(face1).t(), weights,
            sparse_size).to(device)  # .to_dense()  # checked
        sparse_face2 = tsp.FloatTensor(
            torch.LongTensor(face2).t(), weights,
            sparse_size).to(device)  # .to_dense()  # checked

        weighted_faces = sparse_face0 + sparse_face1 + sparse_face2  # checked
        weighted_faces = weighted_faces.coalesce()

        # checked On older pytorch have to cast to float
        weighted_faces = weighted_faces.t()
        node_curv = tsp.mm(weighted_faces, f_curv)
        sum_weights_per_node = tsp.sum(weighted_faces,
                                       dim=1).to_dense()  # checked
        node_curv = node_curv.t() / sum_weights_per_node  # checked
        node_curv = node_curv.t()
        eigs = []
        for i in node_curv:  # checked
            eig = torch.eig(i.reshape(2, 2))
            principal_curvatures = eig.eigenvalues[:, 0].sort(
                descending=True).values
            eigs.append(principal_curvatures)
        eigs = torch.stack(eigs, dim=0)
        s_s = eigs[:, 0] + eigs[:, 1]
        s_p = eigs[:, 0] - eigs[:, 1]
        s = s_s.div(s_p)
        pi = math.pi * torch.ones(len(positions)).to(device)
        s = (2 / pi) * torch.atan(s)

        data.shape_index = s
        if self.remove:
            data.face_curvature = None
            data.face_weights = None
            data.face_normals = None
        return data