Example #1
0
    def knn_search(support_pts, query_pts, k):
        """
        :param support_pts: points you have, B*N1*3
        :param query_pts: points you want to know the neighbour index, B*N2*3
        :param k: Number of neighbours in knn search
        :return: neighbor_idx: neighboring points indexes, B*N2*k
        """

        neighbor_idx = nearest_neighbors.knn_batch(support_pts,
                                                   query_pts,
                                                   k,
                                                   omp=True)
        return neighbor_idx.astype(np.int32)
def nearest_neighbor_interpolate(unknown, known, known_feats):
    """
    :param pts: (n, 4) tensor of the bxyz positions of the unknown features
    :param ctr: (m, 4) tensor of the bxyz positions of the known features
    :param ctr_feats: (m, C) sparsetensor of features to be propigated
    :return:
        new_features: (n, C) tensor of the features of the unknown features
    """
    batch_size = known_feats.batch_size
    features = known_feats.features
    channel = features.shape[-1]
    ind = torch.zeros(unknown.shape[0], 1)
    offsetq = 0
    offsets = 0

    for b in range(batch_size):
        support_points = known[known[:, 0] == b][:, 1:].unsqueeze(0).to(
            'cpu').data.numpy()  # [N,3]
        query_points = unknown[unknown[:, 0] == b][:, 1:].unsqueeze(0).to(
            'cpu').data.numpy()  # [M,3]
        indexs = nearest_neighbors.knn_batch(support_points,
                                             query_points,
                                             1,
                                             omp=True)

        num_spoint = support_points.shape[1]
        num_qpoint = query_points.shape[1]
        ind[offsetq:num_qpoint + offsetq] = torch.Tensor(indexs +
                                                         offsets).squeeze(0)

        offsets += num_spoint
        offsetq += num_qpoint
    ind = ind.long()
    interpolated_feat = features[ind.long()].view(-1, channel)

    return interpolated_feat.contiguous()
Example #3
0
    def collate_batch(self, batch_list, _unused=False):

        data_dict = defaultdict(list)
        for cur_sample in batch_list:
            for key, val in cur_sample.items():
                data_dict[key].append(val)
        batch_size = len(batch_list)
        ret = {}
        image_idx = data_dict['frame_id']
        calib = data_dict['calib']
        for key, val in data_dict.items():
            try:
                if key in ['image']:
                    ret[key] = np.concatenate(val, axis=0)
                elif key in ['points']:
                    num = []
                    for i, coor in enumerate(val):
                        num.append(coor.shape[0])
                    ## sample raw_points & voxels_feature
                    num_ = np.max(num)
                    coors_batch = []
                    #if num_ > 10000:
                    #    num_ = 10000
                    for i, coor in enumerate(val):
                        p, _ = sample_points(coor, coor, num_)
                        coors_batch.append(p)
                    k = key + '_batch'
                    ret[k] = np.array(coors_batch).reshape(
                        (len(num), num_, -1))
                elif key in ['raw_points']:
                    if self.raw == False:  # dont need load raw data
                        #print(self.nbg)
                        num = []
                        for i, coor in enumerate(val):
                            num.append(coor.shape[0])
                        ## sample raw_points
                        num_ = np.max(num)
                        coors_batch = []
                        #if num_ > 10000:
                        #    num_ = 10000
                        for i, coor in enumerate(val):
                            p, _ = sample_points(coor, coor, num_)
                            if self.use_color:
                                img_path = '/media/ddd/data2/3d_MOTS_Ex./Code/OpenPCDet-RandlaNet/data/kitti/training/image_2/' + \
                                           image_idx[i] + '.png'
                                img = cv2.imread(img_path)
                                image = np.float32(img)
                                if not self.use_rgb:
                                    img = cv2.cvtColor(image,
                                                       cv2.COLOR_BGR2Lab)
                                p = p[:, :3]
                                calib_result = calib[i].lidar_to_img(
                                    p)  # [N,3] in lidar to [N,2] in img
                                p = self.painted_point_cloud(
                                    calib_result, img, p)
                            coors_batch.append(p)
                        k = key + '_batch'
                        ret[k] = np.array(coors_batch).reshape(
                            (len(num), num_,
                             -1))  # if colored,return have colored point cloud
                elif key in ['gt_boxes']:
                    max_gt = max([len(x) for x in val])
                    batch_gt_boxes3d = np.zeros(
                        (batch_size, max_gt, val[0].shape[-1]),
                        dtype=np.float32)
                    for k in range(batch_size):
                        batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
                    ret[key] = batch_gt_boxes3d
                elif key in [
                        'voxel_features', 'voxels', 'voxel_num_points',
                        'voxel_coords'
                ]:
                    continue
                else:
                    ret[key] = np.stack(val, axis=0)
            except:
                print('image_idx:', image_idx)
                img_path = '/media/ddd/data2/3d_MOTS_Ex./Code/OpenPCDet-RandlaNet/data/kitti/training/image_2/' + \
                           image_idx[0] + '.png'
                img = cv2.imread(img_path)
                print(img_path)
                print('Error in collate_batch: key=%s' % key)
                raise TypeError

        val = data_dict['voxel_features']
        voxel_coords = data_dict['voxel_coords']
        num = []
        for i, coor in enumerate(val):
            num.append(coor.shape[0])
        num_ = np.max(num)
        fea = []
        cor = []
        #if num_ > 10000:
        #   num_ = 10000
        for i, coor in enumerate(val):
            p, c = sample_points(coor, voxel_coords[i], num_)
            if self.use_color:
                img_path = '/media/ddd/data2/3d_MOTS_Ex./Code/OpenPCDet-RandlaNet/data/kitti/training/image_2/' + \
                           image_idx[i] + '.png'
                img = cv2.imread(img_path)
                image = np.float32(img)
                if not self.use_rgb:
                    img = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
                p = p[:, :3]
                calib_result = calib[i].lidar_to_img(
                    p)  # [N,3] in lidar to [N,2] in img
                p = self.painted_point_cloud(calib_result, img, p)
            fea.append(p)
            cor.append(c)

        ret['voxel_features_batch'] = np.array(fea).reshape(
            (len(num), num_, -1))  #if colored,return have colored point cloud
        ret['voxel_coords_batch'] = np.array(cor).reshape((len(num), num_, -1))
        val = ret['voxel_coords_batch']
        coors = []
        for i, coor in enumerate(val):
            coor_pad = np.pad(coor, ((0, 0), (1, 0)),
                              mode='constant',
                              constant_values=i)
            coors.append(coor_pad)
        ret['voxel_coords_batch'] = np.concatenate(coors, axis=0)
        voxel_features_batch = ret['voxel_features_batch'][:, :, :3]

        if self.nbg:  # True ,使用有背景点的选点方式
            raw_points_batch = ret['raw_points_batch'][:, :, :3]
            neighbor_idx = nearest_neighbors.knn_batch(raw_points_batch,
                                                       voxel_features_batch,
                                                       8,
                                                       omp=True)
        else:
            neighbor_idx = nearest_neighbors.knn_batch(voxel_features_batch,
                                                       voxel_features_batch,
                                                       8,
                                                       omp=True)

        ret['neighbor'] = neighbor_idx
        ret['batch_size'] = batch_size

        return ret
    def forward(self, feat1, feat2):
        '''feat1 is sparsetensor and feat2 is densetensor'''
        device = feat1.features.device
        spatial_shape = feat2.spatial_shape
        indices = feat2.indices

        batch_size = feat1.batch_size
        coord1, features1 = extract_coord_features(feat1.dense().detach())
        coord2, features2 = extract_coord_features(feat2.dense())
        xyz1 = align_pnt(coord1, self.voxelsize,
                         self.point_range)  # [0, -25.6, -2, 51.2, 25.6, 4.4]
        xyz2 = align_pnt(coord2, self.voxelsize * 2, self.point_range)
        ind = torch.zeros(features2.shape[0], self.k)
        offsets = 0
        offsetq = 0

        for b in range(batch_size):
            support_points = xyz1[xyz1[:, 0] == b][:, 1:].unsqueeze(0).to(
                'cpu').data.numpy()  # [N,3]
            query_points = xyz2[xyz2[:, 0] == b][:, 1:].unsqueeze(0).to(
                'cpu').data.numpy()  # [M,3]
            indexs = nearest_neighbors.knn_batch(support_points,
                                                 query_points,
                                                 self.k,
                                                 omp=True)
            assert len(indexs[0]) == len(query_points[0])

            num_spoint = support_points.shape[1]
            num_qpoint = query_points.shape[1]
            ind[offsetq:num_qpoint +
                offsetq] = torch.Tensor(indexs + offsets).squeeze(0)

            offsets += num_spoint
            offsetq += num_qpoint

        ind = ind.long().to(device)
        group, _ = VoxelPooling.index_feat(
            torch.cat([xyz1[:, 1:].float(), features1], 1), ind)
        group_xyz, group_features = group[:, :, :
                                          3], group[:, :,
                                                    3:]  # [N, K, 3], [N, K, D]

        relation = VoxelPooling.relation_position(
            group_xyz, xyz2[:, 1:].float())  # [N, K, 10]
        if self.feat_relation:
            relation = torch.cat([
                relation, group_features,
                features2.unsqueeze(1).repeat((1, self.k, 1))
            ], -1)
        group_w = self.relation_w(relation.permute(0, 2, 1))
        group_features = group_features.permute(0, 2, 1)

        group_features *= group_w
        updated_features = torch.mean(group_features, 2)
        features2 = self.fuse_mlps(torch.cat([updated_features, features2], 1))
        feat = spconv.SparseConvTensor(features2.float(), indices.int(),
                                       spatial_shape, batch_size)

        feat.indice_dict = feat2.indice_dict
        feat.grid = feat2.grid

        return feat.dense()
Example #5
0
import numpy as np
#import lib.python.nearest_neighbors as nearest_neighbors
import nearest_neighbors
import time

batch_size = 16
num_points = 81920
K = 16
pc = np.random.rand(batch_size, num_points, 3).astype(np.float32)

# nearest neighbours
start = time.time()
neigh_idx = nearest_neighbors.knn_batch(pc, pc, K, omp=True)
print(time.time() - start)