def forward(self, xyz, feats, new_xyz=None): ''' :param pcd: B, C_in, N :return: new_pcd: B, C_out, np ''' if new_xyz is None: assert self.npoint is not None xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint idx = pointnet2_utils.furthest_point_sample( xyz, self.npoint) # B,npoint new_xyz_flipped = pointnet2_utils.gather_operation( xyz_flipped, idx) # B,3,npoint new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3 idx = pointnet2_utils.ball_query(self.radius, self.nsample, xyz, new_xyz) gped_feats = pointnet2_utils.grouping_operation(feats, idx) # B,C,np,ns gped_feats = F.max_pool2d(gped_feats, kernel_size=[1, self.nsample]) # B,C,np,1 gped_feats = gped_feats.squeeze(-1) # B,C,np return self.conv(gped_feats)
def index_points_group(points, knn_idx): """ Input: points: input points data, [B, N, C] knn_idx: sample index data, [B, N, K] Return: new_points:, indexed points data, [B, N, K, C] """ points_flipped = points.permute(0, 2, 1).contiguous() new_points = pointnet2_utils.grouping_operation(points_flipped, knn_idx.int()).permute(0, 2, 3, 1) return new_points
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]: """ :param xyz: (B, N, 3) xyz coordinates of the features :param new_xyz: (B, npoint, 3) centroids :param features: (B, C, N) descriptors of the features :return: new_features: (B, 3 + C, npoint, nsample) """ idx = ball_query(self.radius, self.nsample, xyz, new_xyz) xyz_trans = xyz.transpose(1, 2).contiguous() grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample) grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) gtfeatures = get_gt_feature(xyz, new_xyz, \ grouped_xyz.permute(0,2,3,1).contiguous(),\ self.radius, self.nsample).transpose(1,2) # 8 42 4096 gtfeatures = gtfeatures.unsqueeze(-1).expand(-1, -1, -1, self.nsample) if features is not None: grouped_features = grouping_operation(features, idx) if self.use_xyz: new_features = torch.cat([grouped_xyz, \ gtfeatures, grouped_features], \ dim=1) # (B, C + 3, npoint, nsample) else: # new_features = grouped_features new_features = torch.cat([gtfeatures, \ grouped_features], dim=1) # (B, C + 3, npoint, nsample) else: assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" new_features = torch.cat([grouped_xyz, \ gtfeatures], dim=1) # (B, C + 3, npoint, nsample) return new_features
def forward(self, point_cloud): dist, idx = self.KNN(point_cloud, point_cloud) ''' idx is batch_size,k,n_points point_cloud is batch_size,n_dims,n_points point_cloud_neightbors is batch_size,n_dims,k,n_points ''' idx = idx[:, 1:, :] point_cloud_neighbors = grouping_operation(point_cloud, idx.contiguous().int()) point_cloud_central = point_cloud.unsqueeze(2).repeat(1, 1, self.k, 1) #print(point_cloud_central.shape,point_cloud_neighbors.shape) edge_feature = torch.cat( [point_cloud_central, point_cloud_neighbors - point_cloud_central], dim=1) return edge_feature, idx
def get_repulsion_loss(self, pred): _, idx = knn_point(self.nn_size, pred, pred, transpose_mode=True) idx = idx[:, :, 1:].to(torch.int32) # remove first one idx = idx.contiguous() # B, N, nn pred = pred.transpose(1, 2).contiguous() # B, 3, N grouped_points = pn2_utils.grouping_operation( pred, idx) # (B, 3, N), (B, N, nn) => (B, 3, N, nn) grouped_points = grouped_points - pred.unsqueeze(-1) dist2 = torch.sum(grouped_points**2, dim=1) dist2 = torch.max(dist2, torch.tensor(self.eps).cuda()) dist = torch.sqrt(dist2) weight = torch.exp(-dist2 / self.h**2) uniform_loss = torch.mean((self.radius - dist) * weight) # uniform_loss = torch.mean(self.radius - dist * weight) # punet return uniform_loss
def get_uniform_loss(self, pcd, percentage=[0.004, 0.006, 0.008, 0.010, 0.012], radius=1.0): B, N, C = pcd.shape[0], pcd.shape[1], pcd.shape[2] npoint = int(N * 0.05) loss = 0 further_point_idx = pn2_utils.furthest_point_sample( pcd.permute(0, 2, 1).contiguous(), npoint) new_xyz = pn2_utils.gather_operation( pcd.permute(0, 2, 1).contiguous(), further_point_idx) # B,C,N for p in percentage: nsample = int(N * p) r = math.sqrt(p * radius) disk_area = math.pi * (radius**2) / N idx = pn2_utils.ball_query(r, nsample, pcd.contiguous(), new_xyz.permute( 0, 2, 1).contiguous()) #b N nsample expect_len = math.sqrt(disk_area) grouped_pcd = pn2_utils.grouping_operation( pcd.permute(0, 2, 1).contiguous(), idx) #B C N nsample grouped_pcd = grouped_pcd.permute(0, 2, 3, 1) #B N nsample C grouped_pcd = torch.cat(torch.unbind(grouped_pcd, dim=1), dim=0) #B*N nsample C dist, _ = self.knn_uniform(grouped_pcd, grouped_pcd) #print(dist.shape) uniform_dist = dist[:, :, 1:] #B*N nsample 1 uniform_dist = torch.abs(uniform_dist + 1e-8) uniform_dist = torch.mean(uniform_dist, dim=1) uniform_dist = (uniform_dist - expect_len)**2 / (expect_len + 1e-8) mean_loss = torch.mean(uniform_dist) mean_loss = mean_loss * math.pow(p * 100, 2) loss += mean_loss return loss / len(percentage)