Beispiel #1
0
    def forward(self, x, pos, batch):
        # FPS sampling
        id_clusters = fps(pos, ratio=self.ratio, batch=batch)

        # compute for each cluster the k nearest points
        sub_batch = batch[id_clusters] if batch is not None else None

        # beware of self loop
        id_k_neighbor = knn(pos,
                            pos[id_clusters],
                            k=self.k,
                            batch_x=batch,
                            batch_y=sub_batch)

        # transformation of features through a simple MLP
        x = self.mlp(x)

        # Max pool onto each cluster the features from knn in points
        x_out, _ = scatter_max(x[id_k_neighbor[1]],
                               id_k_neighbor[0],
                               dim_size=id_clusters.size(0),
                               dim=0)

        # keep only the clusters and their max-pooled features
        sub_pos, out = pos[id_clusters], x_out
        return out, sub_pos, sub_batch
Beispiel #2
0
def fps(x, batch=None, ratio=0.5, random_start=True):
    r"""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature
    Learning on Point Sets in a Metric Space"
    <https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the
    most distant point with regard to the rest points.

    Args:
        x (Tensor): Node feature matrix
            :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
        batch (LongTensor, optional): Batch vector
            :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
            node to a specific example. (default: :obj:`None`)
        ratio (float, optional): Sampling ratio. (default: :obj:`0.5`)
        random_start (bool, optional): If set to :obj:`False`, use the first
            node in :math:`\mathbf{X}` as starting node. (default: obj:`True`)

    :rtype: :class:`LongTensor`

    .. code-block:: python

        import torch
        from torch_geometric.nn import fps

        x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
        batch = torch.tensor([0, 0, 0, 0])
        index = fps(x, batch, ratio=0.5)
    """
    if torch_cluster is None:
        raise ImportError('`fps` requires `torch-cluster`.')

    return torch_cluster.fps(x, batch, ratio, random_start)
Beispiel #3
0
def test_fps(dtype, device):
    x = tensor([
        [-1, -1],
        [-1, +1],
        [+1, +1],
        [+1, -1],
        [-2, -2],
        [-2, +2],
        [+2, +2],
        [+2, -2],
    ], dtype, device)
    batch = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)

    out = fps(x, batch, ratio=0.5, random_start=False)
    assert out.tolist() == [0, 2, 4, 6]

    out = fps(x, ratio=0.5, random_start=False)
    assert out.sort()[0].tolist() == [0, 5, 6, 7]
Beispiel #4
0
def test_random_fps(device):
    N = 1024
    for _ in range(5):
        pos = torch.randn((2 * N, 3), device=device)
        batch_1 = torch.zeros(N, dtype=torch.long, device=device)
        batch_2 = torch.ones(N, dtype=torch.long, device=device)
        batch = torch.cat([batch_1, batch_2])
        idx = fps(pos, batch, ratio=0.5)
        assert idx.min() >= 0 and idx.max() < 2 * N
Beispiel #5
0
def test_fps_speed(dtype, device):
    return
    batch_size, num_nodes = 100, 10000
    x = torch.randn((batch_size * num_nodes, 3), dtype=dtype, device=device)
    batch = torch.arange(batch_size, dtype=torch.long, device=device)
    batch = batch.view(-1, 1).repeat(1, num_nodes).view(-1)

    out = fps(x, batch, ratio=0.5, random_start=True)
    assert out.size(0) == batch_size * num_nodes * 0.5
    assert out.min().item() >= 0 and out.max().item() < batch_size * num_nodes

    batch_size, num_nodes, dim = 100, 300, 128
    x = torch.randn((batch_size * num_nodes, dim), dtype=dtype, device=device)
    batch = torch.arange(batch_size, dtype=torch.long, device=device)
    batch = batch.view(-1, 1).repeat(1, num_nodes).view(-1)
    out = fps(x, batch, ratio=0.5, random_start=True)
    assert out.size(0) == batch_size * num_nodes * 0.5
    assert out.min().item() >= 0 and out.max().item() < batch_size * num_nodes
Beispiel #6
0
def getPatchDescriptors(mesh,
                        radius=15.0,
                        n_max=20,
                        l_max=10,
                        feature_name='zd',
                        sample_ratio=1.0):

    patches = getRadialGeodesicPatches(mesh,
                                       radius,
                                       add_self_loops=True,
                                       to_csr=True)

    M = MeshDescriptor(mesh,
                       patches,
                       n_max=n_max,
                       l_max=l_max,
                       scale_patches=True,
                       center_patches=True)

    if sample_ratio < 1.0:
        # use fps centroids as patch loci
        from torch_cluster import fps
        import torch

        idx = fps(torch.tensor(mesh.vertices),
                  batch=None,
                  ratio=sample_ratio,
                  random_start=False)
        idx = idx.numpy()
        descriptors = []
        for i in idx:
            descriptors.append(M.getPatchDescriptors(i))
        descriptors = np.array(descriptors)

        # map to all vertices
        descriptors = mapPointFeaturesToMesh(mesh,
                                             mesh.vertices[idx],
                                             descriptors,
                                             map_to='nearest')
    else:
        # loop over vertices, compute moments for every patch
        descriptors = []
        for i in range(M.Nv):
            descriptors.append(M.getPatchDescriptors(i))
        descriptors = np.array(descriptors)

    # add features to mesh
    feature_names = []
    for i in range(descriptors.shape[1]):
        key = "{}{}".format(feature_name, i)
        mesh.vertex_attributes[key] = descriptors[:, i]
        feature_names.append(key)

    return feature_names
    def forward(self, batch):

        indices = fps(batch.pos,
                      batch.batch,
                      ratio=self.ratio,
                      random_start=False)

        new_batch = torch_geometric.data.Batch(batch=batch.batch[indices],
                                               x=batch.x[indices],
                                               pos=batch.pos[indices])

        return new_batch
Beispiel #8
0
def furthest_point_sample(xyz: torch.tensor, num_samples: int):
    # How to use fps: examples/PointNet2ASIS/tests/fps_test.py
    xyz = xyz.transpose(1, 2).contiguous()
    device = xyz.device
    B, N, C = xyz.shape
    xyz = xyz.view(B * N, C)
    batch = torch.arange(0, B, dtype=torch.long, device=device)
    batch = batch.view(-1, 1).repeat(1, N).view(B * N)
    fps_idx = fps(xyz, batch, num_samples / N, True)
    fps_idx = fps_idx.view(B, num_samples)
    idx_base = torch.arange(0, B, device=device).view(-1, 1) * N
    fps_idx = fps_idx - idx_base
    return fps_idx
Beispiel #9
0
    def __call__(self, sample):
        keys = sorted([x for x in dir(sample) if 'edge_index' in x])

        num_vertices = [sample.num_nodes] + sample.num_vertices

        # sample.edge_index = knn_graph(sample.pos, self._k[0])

        # knn_edges = knn_graph(sample.pos, self._k[0] * self._d)
        # dilated_idx = [index for index in range(knn_edges.shape[1])[0::self._d]]

        # sample.edge_index = knn_edges[:, dilated_idx]

        for level, key in enumerate(keys):
            if level == len(keys) - 1:
                break

            pos_key = key.replace('edge_index',
                                  'pos').replace('hierarchy_', '')

            subset_points_idx = fps(sample[pos_key],
                                    ratio=sample.num_vertices[level] /
                                    num_vertices[level])

            # if level == 0:
            #     sample.y = sample.y[subset_points_idx]

            num_vertices[level + 1] = subset_points_idx.shape[0]
            sample.num_vertices[level] = num_vertices[level + 1]
            sample['pos_' +
                   str(level + 1)] = sample[pos_key][subset_points_idx]
            sample[f"hierarchy_trace_index_{level+1}"] = knn(
                sample['pos_' + str(level + 1)], sample[pos_key], 1)[1, :]
            # sample[f"hierarchy_edge_index_{level+1}"] = knn_graph(sample['pos_' + str(level+1)], self._k[level+1])

            # knn_edges = knn_graph(sample['pos_' + str(level+1)], self._k[level+1] * self._d)
            # dilated_idx = [index for index in range(knn_edges.shape[1])[0::self._d]]

            # sample[f"hierarchy_edge_index_{level+1}"] = knn_edges[:, dilated_idx]

        keys = sorted([x for x in dir(sample) if x.startswith('x_')])
        for key in keys:
            delattr(sample, key)

        # keys = sorted([x for x in dir(sample) if 'pos_' in x])
        # for key in keys:
        #     delattr(sample, key)

        return sample
Beispiel #10
0
def fps_sampling(pair_ind, pos, num_pos_pairs, ind=0):
    """
    perform fps sampling to choose the positive pairs
    Parameters:
        pair_ind: torch tensor which represents index of pair size N x 2
        pos: torch tensor which represents the point cloud of size M x 3
        num_pos_pairs: int which number of pairs we want()
        ind: must be 0 or 1
    """
    small_pos_source = pos[pair_ind[:, ind]]
    batch = torch.zeros(small_pos_source.shape[0]).long()
    ratio = float(num_pos_pairs) / len(pair_ind)
    if (ratio <= 0 or ratio >= 1):
        raise ValueError("ratio cannot have this value: {}".format(ratio))
    index = fps(small_pos_source, batch, ratio=ratio, random_start=False)
    return index
Beispiel #11
0
def farthest_point_sample_fast(xyz, npoint):
    """
    Input:
        xyz: pointcloud data, [B, N, C]
        npoint: number of samples
    Return:
        centroids: sampled pointcloud index, [B, npoint]
    """
    device = xyz.device
    B, N, C = xyz.shape

    centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)

    r = npoint / N * 0.9999999 + 0.00000001

    for i in range(B):
        centroids[i] = fps(xyz[i], ratio=r)

    return centroids
def create_patch_pair(depth_path, mask_path, im_cam, gt, save_name,
                      md_pcd_pts):
    raw_depth = io.load_depth(depth_path)
    mask = io.load_im(mask_path)

    img_pcd = PointCloud.create_from_depth_image(
        depth=Image(masked_where(mask == 0.0, raw_depth).filled(0.0)),
        intrinsic=PHCamIntrinsic(*IM_SIZE, *[im_cam['cam_K'][i] for i in K]),
        depth_scale=im_cam['depth_scale'],
        depth_trunc=150000)
    img_pcd.voxel_down_sample(VOXEL_SIZE)

    if np.asarray(img_pcd.points).shape[0] in PCD_PTS_RANGE or IS_TARGET:
        cam_R, cam_t = gt['cam_R_m2c'], gt['cam_t_m2c']

        # Select reference points on image using farthest point sampling
        img_pcd_pts_fps = torch.as_tensor(img_pcd.points).to(DEVICE)
        img_ref_idxs = fps(img_pcd_pts_fps, ratio=FPS_RATIO).to('cpu').numpy()

        # Calculate model reference points
        img_ref_pts = np.asarray(img_pcd.points)[img_ref_idxs]
        md_ref_pts = (img_ref_pts - cam_t.T) @ np.linalg.inv(cam_R).T

        # Recreate model point cloud
        md_ref_idxs = np.arange(md_ref_pts.shape[0])
        md_pcd_pts = np.concatenate([md_ref_pts, md_pcd_pts], axis=0)
        md_pcd = PointCloud()
        md_pcd.points = Vector3dVector(md_pcd_pts)

        # Calculate and save PPFs
        img_save_path = f'image/{save_name}'
        create_local_patches(img_pcd, img_ref_idxs, img_save_path)

        md_save_path = f'model/{save_name}'
        create_local_patches(md_pcd, md_ref_idxs, md_save_path)

        entry = [save_name, img_ref_idxs.shape[0]]
    else:
        entry = []

    return entry
Beispiel #13
0
    def getMutiplePrototypes(self, feat, k):
        """
        Extract multiple prototypes by points separation and assembly

        Args:
            feat: input point features, shape:(n_points, feat_dim)
        Return:
            prototypes: output prototypes, shape: (n_prototypes, feat_dim)
        """
        # sample k seeds as initial centers with Farthest Point Sampling (FPS)
        n = feat.shape[0]
        assert n > 0
        ratio = k / n
        if ratio < 1:
            fps_index = fps(feat, None, ratio=ratio,
                            random_start=False).unique()
            num_prototypes = len(fps_index)
            farthest_seeds = feat[fps_index]

            # compute the point-to-seed distance
            distances = F.pairwise_distance(feat[..., None],
                                            farthest_seeds.transpose(0,
                                                                     1)[None,
                                                                        ...],
                                            p=2)  # (n_points, n_prototypes)

            # hard assignment for each point
            assignments = torch.argmin(distances, dim=1)  # (n_points,)

            # aggregating each cluster to form prototype
            prototypes = torch.zeros((num_prototypes, self.feat_dim)).cuda()
            for i in range(num_prototypes):
                selected = torch.nonzero(assignments == i).squeeze(1)
                selected = feat[selected, :]
                prototypes[i] = selected.mean(0)
            return prototypes
        else:
            return feat
Beispiel #14
0
    def down_sample(self, data):
        ratio = (float(self.point_num_th) /
                 data.shape[0]) if data.shape[0] != 0 else 10086
        if ratio >= 1.0:
            return None, None
        # mid_x = data[:, 0].mean()
        # mid_y = data[:, 1].mean()
        # inds = [
        #     (data[:, 0] >= mid_x) & (data[:, 1] >= mid_y),
        #     (data[:, 0] >= mid_x) & (data[:, 1] <  mid_y),
        #     (data[:, 0] <  mid_x) & (data[:, 1] >= mid_y),
        #     (data[:, 0] <  mid_x) & (data[:, 1] <  mid_y),
        # ]
        # for i in inds:
        # cur_i = fps(data[i], torch.zeros(data[i].shape[0]).cuda().long(), ratio=ratio, random_start=False)
        # cur_index_torch = torch.zeros(data[i].shape[0]).cuda().long()
        # cur_index_torch[cur_i] = 1
        # index_torch[i] = cur_index_torch

        # step = data.shape[0] // 4
        # split = [i * step for i in range(5)]
        # split[-1] = data.shape[0]
        # index_torch = torch.zeros(data.shape[0]).cuda().long()
        # for i in range(4):
        #     cur_i = fps(data[split[i]:split[i+1]], torch.zeros(data[split[i]:split[i+1]].shape[0]).cuda().long(), ratio=ratio, random_start=False)
        #     index_torch[cur_i + i*step] = 1

        index_torch = fps(data,
                          torch.zeros(data.shape[0]).cuda().long(),
                          ratio=ratio,
                          random_start=False)
        # index_torch = index_torch == 1
        index = index_torch.detach().cpu().numpy()

        # index = np.random.choice(data.shape[0], int(ratio * data.shape[0]))
        # index_torch = torch.from_numpy(index).cuda()
        return index, index_torch
def farthest_sampling(point_clouds: PointClouds3D,
                      ratio: float) -> PointClouds3D:
    """
    Args:
        point_clouds: Pointclouds object
    """
    points_packed = point_clouds.points_packed()

    from torch_cluster import fps
    packed_to_cloud_idx = num_points_2_packed_to_cloud_idx(
        point_clouds.num_points_per_cloud())
    fps_idx = fps(points_packed, packed_to_cloud_idx, ratio)
    sampled_points = points_packed[fps_idx]

    # back to pointclouds object
    point_lst = [
        sampled_points[packed_to_cloud_idx[fps_idx] == b]
        for b in range(len(point_clouds))
    ]
    sampled_point_clouds = point_clouds.__class__(point_lst)

    if (normals_packed := point_clouds.normals_packed()) is not None:
        normals_packed = normals_packed[fps_idx]
        sampled_point_clouds.update_normals_(normals_packed)
            # select only corresponding points
            pcd1_corr = pcd1.select_down_sample(corrs[:, 0])
            pcd2_corr = pcd2.select_down_sample(corrs[:, 1])

            pcd1 = pcd1.voxel_down_sample(voxel_size)
            pcd2 = pcd2.voxel_down_sample(voxel_size)

            # apply ground truth transformation to bring them in the same reference frame
            pcd1_corr.transform(T1)
            pcd2_corr.transform(T2)

            # FPS
            tensor_pcd1_frag = torch.Tensor(np.asarray(pcd1_corr.points)).to(device)
            fps_pcd1_idx = fps(tensor_pcd1_frag,
                               ratio=batch_size / tensor_pcd1_frag.shape[0],
                               random_start=True)

            _pcd2_frag_tree = o3d.geometry.KDTreeFlann(pcd2_corr)

            fps_pcd1_pts = np.asarray(pcd1_corr.points)[fps_pcd1_idx.cpu()]

            fps_pcd2_idx = torch.empty(fps_pcd1_idx.shape, dtype=int)

            # find nearest neighbors on the other point cloud
            for i, pt in enumerate(fps_pcd1_pts):
                _, patch_idx, _ = _pcd2_frag_tree.search_knn_vector_xd(pt, 1)
                fps_pcd2_idx[i] = patch_idx[0]

            # visualise point clouds with FPS + NN result overlaid
            # to_viz = []
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--folder', '-f', help='Path to data folder')
    parser.add_argument('--benchmark', '-b', help='Path to benchmark folder')
    parser.add_argument('--outpath', '-o', help='Path to output folder')
    parser.add_argument('--saveply',
                        '-s',
                        action='store_true',
                        help='Save color ply or not')
    args = parser.parse_args()
    print(args)

    #label_tsv = args.benchmark + "/scannet-labels.combined.tsv"
    train_list_file = args.benchmark + "/train.txt"
    test_list_file = args.benchmark + "/test.txt"
    val_list_file = args.benchmark + "/val.txt"
    #label_shapenetcore55 = args.benchmark + "/classes_ObjClassification-ShapeNetCore55.txt"

    ##########################################################Read Source##########################################################

    print("read scene dir:", args.folder)
    scenedir = dir(args.folder, 'd')

    print("read trainval list:", train_list_file)
    train_scene_list = []
    with open(train_list_file, 'r') as train_f:
        for line in train_f.readlines():
            sceneid = line.strip().split("scene")[1]
            spaceid = sceneid.split("_")[0]
            scanid = sceneid.split("_")[1]
            train_scene_list.append(spaceid + scanid)

    print("read test list:", test_list_file)
    test_scene_list = []
    with open(test_list_file, 'r') as train_f:
        for line in train_f.readlines():
            sceneid = line.strip().split("scene")[1]
            spaceid = sceneid.split("_")[0]
            scanid = sceneid.split("_")[1]
            test_scene_list.append(spaceid + scanid)

    print("read val list:", val_list_file)
    val_scene_list = []
    with open(val_list_file, 'r') as train_f:
        for line in train_f.readlines():
            sceneid = line.strip().split("scene")[1]
            spaceid = sceneid.split("_")[0]
            scanid = sceneid.split("_")[1]
            val_scene_list.append(spaceid + scanid)

    # split scene to train and test
    process_train_list = []
    process_test_list = []

    for scene in scenedir:

        sceneid = scene.strip().split("scene")[1]
        spaceid = sceneid.split("_")[0]
        scanid = sceneid.split("_")[1]
        scenename = spaceid + scanid

        if scenename in train_scene_list:

            process_train_list.append(scene)

        elif scenename in test_scene_list:

            process_test_list.append(scene)

    print("Train all:", len(train_scene_list), "Test all:",
          len(test_scene_list), "Dir all:", len(scenedir))
    print("Process Train:", len(process_train_list), "Process Test:",
          len(process_test_list))
    ##########################################################Process Data##########################################################
    print("Process Train Scene:")

    for scene in process_train_list:
        ply_file = scene + "/scene" + sceneid + "_vh_clean_2.ply"
        # Read ply file
        print("\nRead ply file:", ply_file)
        plydata = PlyData.read(ply_file).elements[0].data
        pts_num = len(plydata)
        print("points num:", pts_num)
        data = torch.tensor(plydata)
        batch = torch.zeros(pts_num)
        index = fps(data, batch, ratio=0.5, random_start=False)
        data = data[index]
        torch.save(data)

    print("Process Test Scene:")

    for scene in process_test_list:
        scene2instances(scene, args.outpath + "/test/",
                        [label_map, label_info], label_shapenetcore55_map,
                        args.saveply)
def fps2(x: Tensor, ratio: Tensor) -> Tensor:
    return fps(x, None, ratio, False)
    def all_getter(self,idx):



        save_id,cloud_ind_0,cloud_ind_1,common_voxel = self.all_valid_combs[idx]['combination']
        ground_height = self.save_dict[save_id]['ground_height']
        clouds = self.save_dict[save_id]['clouds']
        center = self.all_valid_combs[idx]['voxel_center']
        are_same = (cloud_ind_1 == cloud_ind_0)
        
        
        cloud_0,cloud_1 = clouds[cloud_ind_0],clouds[cloud_ind_1]

    
        voxel_1_small = get_voxel(cloud_1,center,self.final_voxel_size)
        voxel_0_large = get_voxel(cloud_0,center,self.context_voxel_size)
        
        
        
     

        voxel_1_small = voxel_1_small[fps(voxel_1_small, torch.zeros(voxel_1_small.shape[0]).long(
        ), ratio=self.n_samples/voxel_1_small.shape[0], random_start=False), :]
        voxel_1_small = voxel_1_small[:self.n_samples, :]

        
        
    
        voxel_0_large = voxel_0_large[fps(voxel_0_large, torch.zeros(voxel_0_large.shape[0]).long(
        ), ratio=self.n_samples_context/voxel_0_large.shape[0], random_start=False), :]
        voxel_0_large = voxel_0_large[:self.n_samples_context,:]

        if self.include_all:
            

            voxel_0_small = get_voxel(cloud_0,center,self.final_voxel_size)
            
            voxel_1_small_original = voxel_1_small.clone()

            voxel_0_small = voxel_0_small[fps(voxel_0_small, torch.zeros(voxel_0_small.shape[0]).long(
            ), ratio=self.n_samples/voxel_0_small.shape[0], random_start=False), :]
            voxel_0_small = voxel_0_small[:self.n_samples,:]

            voxel_0_small_original = voxel_0_small.clone()
            voxel_0_small_self, voxel_0_large_self,inverse = self.last_processing(voxel_0_small,voxel_0_large)


            voxel_1_large = get_voxel(cloud_1,center,self.context_voxel_size)
            voxel_1_large = voxel_1_large[fps(voxel_1_large, torch.zeros(voxel_1_large.shape[0]).long(
            ), ratio=self.n_samples/voxel_1_large.shape[0], random_start=False), :]
            voxel_1_large = voxel_1_large[:self.n_samples,:]
            voxel_1_large_self, voxel_1_small_self,inverse = self.last_processing(voxel_1_large,voxel_1_small)
            voxel_1_large_self, voxel_1_small_self,inverse = self.last_processing(voxel_1_large,voxel_1_small)
            voxel_opposite_small , voxel_opposite_large, inverse  = self.last_processing(voxel_0_small,voxel_1_large)
        #Only augment in train
        if are_same:
            voxel_1_small = voxel_1_small.clone()
            if self.mode == 'train':
                voxel_0_large[:, :3] += torch.rand_like(voxel_0_large[:, :3])*0.01
        

        voxel_0_large, voxel_1_small,inverse  = self.last_processing(voxel_0_large, voxel_1_small)

        if self.mode == 'train':
            rads = torch.rand((1))*math.pi*2

            if self.rotation_augment:
                rot_mat = rotate_xy(rads)
                voxel_0_large[:, :2] = torch.matmul(voxel_0_large[:, :2], rot_mat)
                voxel_1_small[:, :2] = torch.matmul(voxel_1_small[:, :2], rot_mat)
        
        # Distance from ground as extra context
        extra_context = inverse['mean'][2] - ground_height
        extra_context = extra_context.unsqueeze(-1)
        if self.include_all:
            return voxel_0_large, voxel_1_small,extra_context, voxel_1_large_self, voxel_1_small_self, voxel_opposite_small , voxel_opposite_large,  voxel_0_small_self, voxel_0_large_self, voxel_0_small_original ,voxel_1_small_original
        else:
            return voxel_0_large, voxel_1_small,extra_context