def forward(self, x, in_f):
        points_diff = square_distance(x, x)
        knn_idx = points_diff.argsort()[:, :, :self.k]
        points_knn = index_points(x, knn_idx)

        f = self.fc1(in_f)
        #print(points_knn.shape)
        true_k = points_knn.shape[2]
        delta = self.del_mlp2(
            F.relu(
                self.del_mlp1(
                    x.repeat((1, 1, true_k)).reshape(points_knn.shape) -
                    points_knn)))
        psi = index_points(self.psi_fc(f), knn_idx)
        phi = self.phi_fc(f).repeat((1, 1, true_k)).reshape(delta.shape)
        alpha = index_points(self.alpha_fc(f), knn_idx)

        dropped_mlp = self.dropout(
            self.dpt_mlp2(F.relu(self.dpt_mlp1(f))).repeat(
                (1, 1, true_k)).reshape(delta.shape))
        #dropout_refactored = dropped_mlp*self.dropout(torch.ones(f.shape[1]).unsqueeze(1).unsqueeze(0).to(self.device))

        gamma = self.gam_mlp2(
            F.relu(self.gam_mlp1(phi - psi + delta + dropped_mlp)))
        rho = F.softmax(gamma / (true_k**0.5), dim=2)
        y = torch.sum(rho * (alpha + delta), dim=2)
        out_f = self.fc2(y)
        return out_f + in_f
 def forward(self, xyz, x, dist_arg_sort):
     """Attention on first g points of every layer."""
     g_idx, _ = torch.sort(dist_arg_sort[:, :self.g])
     g_xyz = index_points(xyz, g_idx)
     gq, gk, gv = self.g_qs(x[:, :self.g]), index_points(
         self.g_ks(x), g_idx), index_points(self.g_vs(x), g_idx)
     g_pos_enc = self.g_delta(xyz[:, :self.g, None] - g_xyz)
     g_attn = self.g_gamma(gq[:, :self.g, None] - gk + g_pos_enc)
     g_attn = F.softmax(g_attn / np.sqrt(gk.size(-1)), dim=-2)
     g_res = torch.einsum('bmnf,bmnf->bmf', g_attn, gv + g_pos_enc)
     return g_res
Ejemplo n.º 3
0
    def forward(self, xyz, features):
        dists = square_distance(xyz, xyz)
        knn_idx = dists.argsort()[:, :, :self.k]  # b x n x k
        knn_xyz = index_points(xyz, knn_idx)
        
        pre = features
        x = self.fc1(features)
        q, k, v = self.w_qs(x), index_points(self.w_ks(x), knn_idx), index_points(self.w_vs(x), knn_idx)

        pos_enc = self.fc_delta(xyz[:, :, None] - knn_xyz)  # b x n x k x f
        
        attn = self.fc_gamma(q[:, :, None] - k + pos_enc)
        attn = F.softmax(attn / np.sqrt(k.size(-1)), dim=-2)  # b x n x k x f
        
        res = torch.einsum('bmnf,bmnf->bmf', attn, v + pos_enc)
        res = self.fc2(res) + pre
        return res, attn
Ejemplo n.º 4
0
def sample_and_group(npoint, nsample, xyz, points):
    B, N, C = xyz.shape
    S = npoint

    fps_idx = farthest_point_sample(xyz, npoint)  # [B, npoint]

    new_xyz = index_points(xyz, fps_idx)
    new_points = index_points(points, fps_idx)

    dists = square_distance(new_xyz, xyz)  # B x npoint x N
    idx = dists.argsort()[:, :, :nsample]  # B x npoint x K

    grouped_points = index_points(points, idx)
    grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)
    new_points = torch.cat([
        grouped_points_norm,
        new_points.view(B, S, 1, -1).repeat(1, 1, nsample, 1)
    ],
                           dim=-1)
    return new_xyz, new_points
Ejemplo n.º 5
0
    def forward(self, xyz, features):
        dists = square_distance(xyz, xyz)
        dist_arg_sort = dists.argsort()
        knn_idx = dist_arg_sort[:, :, :self.k + 1]
        if self.global_attn:
            knn_idx[:, :, -1] = 0
            knn_idx = torch.roll(knn_idx, 1, -1)
        knn_xyz = index_points(xyz, knn_idx)

        pre = features
        x = self.fc1(features)

        k_proj, v_proj = self.w_ks(x), self.w_vs(x)
        q, k, v = self.w_qs(x), index_points(k_proj, knn_idx), index_points(
            v_proj, knn_idx)
        pos_enc = self.fc_delta(xyz[:, :, None] - knn_xyz)
        attn = self.fc_gamma(q[:, :, None] - k + pos_enc)

        attn = F.softmax(attn / np.sqrt(k.size(-1)), dim=-2)
        res = torch.einsum('bmnf,bmnf->bmf', attn, v + pos_enc)
        if self.global_attn and self.share_params:  # compute global attention separately with share params
            g_idx = torch.sort(dist_arg_sort[:, :self.g])[0]
            g_xyz = index_points(xyz, g_idx)
            gq, gk, gv = q[:, :self.g], index_points(k_proj,
                                                     g_idx), index_points(
                                                         v_proj, g_idx)
            pos_enc = self.fc_delta(xyz[:, :self.g, None] - g_xyz)
            g_attn = self.fc_gamma(gq[:, :self.g, None] - gk + pos_enc)
            g_attn = F.softmax(g_attn / np.sqrt(gk.size(-1)), dim=-2)
            g_res = torch.einsum('bmnf,bmnf->bmf', g_attn, gv + pos_enc)
            res[:, :self.g] = g_res  # B x N X D
        elif self.global_attn:  # compute global attention separately without share params
            g_res = self.g_attn(xyz=xyz, x=x,
                                dist_arg_sort=dist_arg_sort)  # B x 1 x D
            res[:, :self.g] = g_res
        elif self.sparse_mat:
            # caution running this may cause CUDA OOM errors, needs memory for 3 x B x N x N x D matrix.
            # Set self.sparse_mat to true manually
            res = build_sparse_matrix_with_global_attention(
                attn, knn_idx, k, v, pos_enc)

        res = self.fc2(res) + pre
        return res