def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        if np.isscalar(radius):
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
        else:
            idx_list = []
            for radius_one, xyz_one, new_xyz_one in zip(tf.unstack(radius,axis=0), tf.unstack(xyz, axis=0),tf.unstack(new_xyz, axis=0)):
                idx_one, _ = query_ball_point(radius_one, nsample, tf.expand_dims(xyz_one, axis=0), tf.expand_dims(new_xyz_one, axis=0))
                idx_list.append(idx_one)
            idx = tf.stack(idx_list, axis=0)
            idx = tf.squeeze(idx, axis=1)

    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]),grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
            new_points = tf.concat([grouped_xyz, grouped_points],axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        # new_points =  tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])], axis=-1)
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Exemple #2
0
 def call(self, x):
     '''
     Input: List
         xyz : (batch, n_inputs, 3)
         features : (batch, n_inputs, channels)
     Output: List
         new_xyz: (batch_size, n_centroids, 3) TF tensor
         new_points: (batch_size, n_centroids, n_samples, 3+channel) TF tensor
         centroid_idx: (batch_size, n_centroids) TF tensor, indices of centroid
         grouped_xyz: (batch_size, n_centroids, n_samples, 3) TF tensor, normalized point XYZs
     '''
     xyz, features = x
     if self.random:
         centroid_idx = random_sample(self.n_centroid, xyz)
     else:
         centroid_idx = farthest_point_sample(self.n_centroid, xyz)
     new_xyz = gather_point(xyz, centroid_idx)  # (batch, n_centroid, 3)
     idx, _ = query_ball_point(self.radius, self.n_samples, xyz, new_xyz)
     grouped_xyz = group_point(
         xyz, idx)  # (batch_size, n_centroids, n_sample, 3)
     grouped_xyz -= tf.tile(
         tf.expand_dims(new_xyz, 2),
         [1, 1, self.n_samples, 1])  # translation normalization
     grouped_xyz /= self.radius  # normalize xyz w.r.t the radius
     if self.use_feature:  # can't use None type here
         grouped_points = group_point(
             features, idx)  # (batch_size, n_centroid, n_samples, channels)
         if self.use_xyz:
             new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)
             # (batch_size, n_centroid, n_samples, channels + 3)
         else:
             new_points = grouped_points
     else:
         new_points = grouped_xyz
     return [new_xyz, new_points, centroid_idx, grouped_xyz]
Exemple #3
0
def grouping(feature, K, src_xyz, q_xyz, use_xyz=True, use_knn=True, radius=0.2):
    '''
    K: neighbor size
    src_xyz: original point xyz (batch_size, ndataset, 3)
    q_xyz: query point xyz (batch_size, npoint, 3)
    '''

    batch_size = src_xyz.get_shape()[0]
    npoint = q_xyz.get_shape()[1]

    if use_knn:
        point_indices = tf.py_func(knn_query, [K, src_xyz, q_xyz], tf.int32)
        batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1, 1)), (1, npoint, K, 1))
        idx = tf.concat([batch_indices, tf.expand_dims(point_indices, axis=3)], axis=3)
        idx.set_shape([batch_size, npoint, K, 2])
        grouped_xyz = tf.gather_nd(src_xyz, idx)
    else:
        point_indices, _ = tf_grouping.query_ball_point(radius, K, src_xyz, q_xyz)
        grouped_xyz = tf_grouping.group_point(src_xyz, point_indices)

    grouped_feature = tf.gather_nd(feature, idx)

    if use_xyz:
        grouped_feature = tf.concat([grouped_xyz, grouped_feature], axis=-1)

    return grouped_xyz, grouped_feature, idx
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    ''' New sample_and_group with Fully Delayed-Aggregation
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    point_cloud_shape = points.get_shape()
    batch_size = point_cloud_shape[0].value
    num_points = point_cloud_shape[1].value
    num_dims = point_cloud_shape[-1].value
    
    # get the index and coordinates of sampled points
    sampled_idx = tf.random_uniform(shape=(batch_size, npoint),maxval=npoint-1,dtype=tf.int32)
    new_xyz = gather_point(xyz, sampled_idx) # (batch_size, npoint, 3)
    
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    # grouping:
    idx_ = tf.range(batch_size) * num_points
    idx_ = tf.reshape(idx_, [batch_size, 1, 1])

    points = tf.reshape(points, [-1, num_dims])
    new_points = tf.gather(points, idx + idx_)
    
    # get the sampled points as centroids with xyz+feature for coord correction
    sampled_idx = tf.expand_dims(sampled_idx, -1)
    sampled_points = tf.gather(points, sampled_idx + idx_)

    # coord correction
    new_points -= sampled_points

    # get the new xyz set for sampled points and neighbors
    xyz_shape = xyz.get_shape()
    batch_size = xyz_shape[0].value
    num_points = xyz_shape[1].value
    num_dims = xyz_shape[-1].value

    idx_ = tf.range(batch_size) * num_points
    idx_ = tf.reshape(idx_, [batch_size, 1, 1])
    
    xyz_reshaped = tf.reshape(xyz, [-1, num_dims])
    grouped_xyz = tf.gather(xyz_reshaped, idx + idx_)
  
    grouped_xyz -= tf.expand_dims(new_xyz, 2) # translation normalization

    return new_xyz, new_points, idx, grouped_xyz
Exemple #5
0
 def test(self):
     knn = True
     np.random.seed(100)
     pts = np.random.random((32, 512, 64)).astype("float32")
     tmp1 = np.random.random((32, 512, 3)).astype("float32")
     tmp2 = np.random.random((32, 128, 3)).astype("float32")
     with tf.device("/gpu:0"):
         points = tf.constant(pts)
         xyz1 = tf.constant(tmp1)
         xyz2 = tf.constant(tmp2)
         radius = 0.1
         nsample = 64
         if knn:
             _, idx = knn_point(nsample, xyz1, xyz2)
             grouped_points = group_point(points, idx)
         else:
             idx, _ = query_ball_point(radius, nsample, xyz1, xyz2)
             grouped_points = group_point(points, idx)
             # grouped_points_grad = tf.ones_like(grouped_points)
             # points_grad = tf.gradients(grouped_points, points, grouped_points_grad)
     with tf.compat.v1.Session("") as sess:
         now = time.time()
         for _ in range(100):
             ret = sess.run(grouped_points)
         print(time.time() - now)
         print(ret.shape, ret.dtype)
         print(ret)
Exemple #6
0
def sample_and_group(sample_pt_num, radius, neighbor_size, input_xyz, input_features):
    '''
    Input:
        sample_pt_num: how many points to keep
        radius: query ball radius
        neighbor_size: how many neighbor points
        input_xyz: (batch_size, npoints, 3)
        input_features: (batch_size, npoint, C)
    Output:
        sampled_xyz: (batch_size, sample_pt_num, 3)
        idx: (batch_size, sample_pt_num, neighbor_size)
        sampled_grouped_relation: (batch_size, sample_pt_num, neighbor_size, 10)
        sampled_grouped_features: (batch_size, sample_pt_num, neighbor_size, C)

    '''

    sampled_xyz = gather_point(input_xyz, farthest_point_sample(sample_pt_num, input_xyz))  # (batch_size, sample_pt_num, 3)
    idx, pts_cnt = query_ball_point(radius, neighbor_size, input_xyz, sampled_xyz)
    sampled_grouped_xyz = group_point(input_xyz, idx)  # (batch_size, sample_pt_num, neighbor_size, 3)
    sampled_grouped_features = group_point(input_features, idx)

    sampled_center_xyz = tf.tile(tf.expand_dims(sampled_xyz, 2), [1, 1, neighbor_size, 1])  # (batch_size, npoint, nsample, 3)

    euclidean = tf.reduce_sum(tf.square(sampled_grouped_xyz-sampled_center_xyz), axis=-1, keepdims=True)  # (batch_size, npoint, nsample, 1)
    sampled_grouped_relation = tf.concat([euclidean, sampled_center_xyz-sampled_grouped_xyz,
                                  sampled_center_xyz, sampled_grouped_xyz], axis=-1)  # (batch_size, npoint, nsample, 10)

    return sampled_xyz, idx, sampled_grouped_relation, sampled_grouped_features
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Exemple #8
0
def get_repulsion_loss(pred,
                       nsample=20,
                       radius=0.07,
                       knn=False,
                       use_l1=False,
                       h=0.001):

    if knn:
        _, idx = knn_point_2(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    # get the uniform loss
    if use_l1:
        dists = tf.reduce_sum(tf.abs(grouped_pred), axis=-1)
    else:
        dists = tf.reduce_sum(grouped_pred**2, axis=-1)

    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(h) * 2
    print(("h is ", h))

    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    repulsion_loss = tf.reduce_mean(val)
    return repulsion_loss
def kernel_density_estimation_ball(pts, radius, sigma, N_points = 128, is_norm = False):
    with tf.variable_scope("ComputeDensity") as sc:
        idx, pts_cnt = tf_grouping.query_ball_point(radius, N_points, pts, pts)
        g_pts = tf_grouping.group_point(pts, idx)
        g_pts -= tf.tile(tf.expand_dims(pts, 2), [1, 1, N_points, 1])

        R = tf.sqrt(sigma)
        xRinv = tf.div(g_pts, R)
        quadform = tf.reduce_sum(tf.square(xRinv), axis = -1)
        logsqrtdetSigma = tf.log(R) * 3
        mvnpdf = tf.exp(-0.5 * quadform - logsqrtdetSigma - 3 * tf.log(2 * 3.1415926) / 2)

        first_val, _ = tf.split(mvnpdf, [1, N_points - 1], axis = 2)

        mvnpdf = tf.reduce_sum(mvnpdf, axis = 2, keepdims = True)

        num_val_to_sub = tf.expand_dims(tf.cast(tf.subtract(N_points, pts_cnt), dtype = tf.float32), axis = -1)

        val_to_sub = tf.multiply(first_val, num_val_to_sub)

        mvnpdf = tf.subtract(mvnpdf, val_to_sub)

        scale = tf.div(1.0, tf.expand_dims(tf.cast(pts_cnt, dtype = tf.float32), axis = -1))
        density = tf.multiply(mvnpdf, scale)

        if is_norm:
            #grouped_xyz_sum = tf.reduce_sum(grouped_xyz, axis = 1, keepdims = True)
            density_max = tf.reduce_max(density, axis = 1, keepdims = True)
            density = tf.div(density, density_max)

        return density
Exemple #10
0
def get_uniform_loss(pcd,
                     percentages=[0.004, 0.006, 0.008, 0.010, 0.012],
                     radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        disk_area = math.pi * (radius**2) * p / nsample
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        #expect_len =  tf.sqrt(2*disk_area/1.732)#using hexagon
        expect_len = tf.sqrt(disk_area)  # using square

        grouped_pcd = group_point(pcd, idx)
        grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)

        var, _ = knn_point(2, grouped_pcd, grouped_pcd)
        uniform_dis = -var[:, :, 1:]
        uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8))
        uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1])
        uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
        uniform_dis = tf.reshape(uniform_dis, [-1])

        mean, variance = tf.nn.moments(uniform_dis, axes=0)
        mean = mean * math.pow(p * 100, 2)
        #nothing 4
        loss.append(mean)
    return tf.add_n(loss) / len(percentages)
    def radius_search_and_group(centroids_xyz, radius, num_neighbors, points_xyz, points_features):
        """ Perform radius search and grouping of points_xyz around each centroids_xyz

            Args:
            centroids_xyz: tf.tensor, xyz locations of centroids
            radius: float, radius of spherical region around centroid
            num_neighbors: int, number of neighbors to include in grouping per centroid
            points_xyz: tf.tensor, xyz locations of points
            points_features: tf.tensor, features of points

            Returns tf.tensor, grouped points and point features

        """
        
        # Radius search around each centroid, returning num_neighbors point indices within radius of centroid
        point_indices, _ = tf_grouping.query_ball_point(radius, num_neighbors, points_xyz, centroids_xyz)
        
        # Group neighboring points (and corresponding point features) together
        grouped_points_xyz = tf_grouping.group_point(points_xyz, point_indices) # (batch_size, num_centroids, num_neighbors, 3)
        grouped_points_features = tf_grouping.group_point(points_features, point_indices) # (batch_size, num_centroids, num_neighbors, num_features)
        
        # Normalize points' xyz locations in local region by subtracting the xyz of the centroid of that region
        grouped_points_xyz -= tf.tile(tf.expand_dims(centroids_xyz, 2), [1,1, num_neighbors ,1])
        grouped_points_xyz_and_features = tf.concat([grouped_points_xyz, grouped_points_features], axis=-1) # (batch_size, num_centroids, num_neighbors, 3+num_features)

        return grouped_points_xyz_and_features
Exemple #12
0
def get_perulsion_loss(pred,
                       nsample=15,
                       radius=0.07,
                       knn=False,
                       numpoint=512,
                       use_l1=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point_2(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, numpoint))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    if use_l1:
        dists = tf.sqrt(dists + 1e-12)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(0.001) * 2
    else:
        h = 0.01
    print("h is ", h)
    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    perulsion_loss = tf.reduce_mean(val)
    return perulsion_loss
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def set_upconv_module(xyz1, xyz2, feat1, feat2, nsample, mlp, mlp2, is_training,
                      scope, bn_decay=None, bn=True, pooling='max', radius=None,
                      knn=True):
    """
        Feature propagation from xyz2 (less points) to xyz1 (more points)

    Inputs:
        xyz1: (batch_size, npoint1, 3)
        xyz2: (batch_size, npoint2, 3)
        feat1: (batch_size, npoint1, channel1) features for xyz1 points (earlier layers)
        feat2: (batch_size, npoint2, channel2) features for xyz2 points
    Output:
        feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3)

        TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.
    """
    with tf.variable_scope(scope) as sc:
        if knn:
            l2_dist, idx = knn_point(nsample, xyz2, xyz1)
        else:
            idx, pts_cnt = query_ball_point(radius, nsample, xyz2, xyz1)

        xyz2_grouped = group_point(xyz2, idx)  # batch_size, npoint1, nsample, 3
        xyz1_expanded = tf.expand_dims(xyz1, 2)  # batch_size, npoint1, 1, 3
        xyz_diff = xyz2_grouped - xyz1_expanded  # batch_size, npoint1, nsample, 3

        feat2_grouped = group_point(feat2,
                                    idx)  # batch_size, npoint1, nsample, channel2
        net = tf.concat([feat2_grouped, xyz_diff],
                        axis=3)  # batch_size, npoint1, nsample, channel2+3

        if mlp is None: mlp = []
        for i, num_out_channel in enumerate(mlp):
            net = tf_util.conv2d(net, num_out_channel, [1, 1],
                                 padding='VALID', stride=[1, 1],
                                 bn=True, is_training=is_training,
                                 scope='conv%d' % (i), bn_decay=bn_decay)
        if pooling == 'max':
            feat1_new = tf.reduce_max(net, axis=[2], keep_dims=False,
                                      name='maxpool')  # batch_size, npoint1, mlp[-1]
        elif pooling == 'avg':
            feat1_new = tf.reduce_mean(net, axis=[2], keep_dims=False,
                                       name='avgpool')  # batch_size, npoint1, mlp[-1]

        if feat1 is not None:
            feat1_new = tf.concat([feat1_new, feat1],
                                  axis=2)  # batch_size, npoint1, mlp[-1]+channel1

        feat1_new = tf.expand_dims(feat1_new,
                                   2)  # batch_size, npoint1, 1, mlp[-1]+channel2
        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp2):
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1, 1],
                                       padding='VALID', stride=[1, 1],
                                       bn=True, is_training=is_training,
                                       scope='post-conv%d' % (i),
                                       bn_decay=bn_decay)
        feat1_new = tf.squeeze(feat1_new, [2])  # batch_size, npoint1, mlp2[-1]
        return feat1_new
Exemple #15
0
def flow_embedding_module(xyz1, xyz2, feat1, feat2, radius, nsample, mlp, is_training, bn_decay, scope, bn=True, pooling='max', knn=True, corr_func='elementwise_product'):
    """
    Input:
        xyz1: (batch_size, npoint, 3)
        xyz2: (batch_size, npoint, 3)
        feat1: (batch_size, npoint, channel)
        feat2: (batch_size, npoint, channel)
    Output:
        xyz1: (batch_size, npoint, 3)
        feat1_new: (batch_size, npoint, mlp[-1])
    """
    if knn:
        _, idx = knn_point(nsample, xyz2, xyz1)
    else:
        idx, cnt = query_ball_point(radius, nsample, xyz2, xyz1)
        _, idx_knn = knn_point(nsample, xyz2, xyz1)
        cnt = tf.tile(tf.expand_dims(cnt, -1), [1,1,nsample])
        idx = tf.where(cnt > (nsample-1), idx, idx_knn)

    xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint, nsample, 3
    xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint, 1, 3
    xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint, nsample, 3

    feat2_grouped = group_point(feat2, idx) # batch_size, npoint, nsample, channel
    feat1_expanded = tf.expand_dims(feat1, 2) # batch_size, npoint, 1, channel
    # TODO: change distance function
    if corr_func == 'elementwise_product':
        feat_diff = feat2_grouped * feat1_expanded # batch_size, npoint, nsample, channel
    elif corr_func == 'concat':
        feat_diff = tf.concat(axis=-1, values=[feat2_grouped, tf.tile(feat1_expanded,[1,1,nsample,1])]) # batch_size, npoint, sample, channel*2
    elif corr_func == 'dot_product':
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
    elif corr_func == 'cosine_dist':
        feat2_grouped = tf.nn.l2_normalize(feat2_grouped, -1)
        feat1_expanded = tf.nn.l2_normalize(feat1_expanded, -1)
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
    elif corr_func == 'flownet_like': # assuming square patch size k = 0 as the FlowNet paper
        batch_size = xyz1.get_shape()[0].value
        npoint = xyz1.get_shape()[1].value
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
        total_diff = tf.concat(axis=-1, values=[xyz_diff, feat_diff]) # batch_size, npoint, nsample, 4
        feat1_new = tf.reshape(total_diff, [batch_size, npoint, -1]) # batch_size, npoint, nsample*4
        #feat1_new = tf.concat(axis=[-1], values=[feat1_new, feat1]) # batch_size, npoint, nsample*4+channel
        return xyz1, feat1_new


    feat1_new = tf.concat([feat_diff, xyz_diff], axis=3) # batch_size, npoint, nsample, [channel or 1] + 3
    # TODO: move scope to outer indent
    with tf.variable_scope(scope) as sc:
        for i, num_out_channel in enumerate(mlp):
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
                                       padding='VALID', stride=[1,1],
                                       bn=True, is_training=is_training,
                                       scope='conv_diff_%d'%(i), bn_decay=bn_decay)
    if pooling=='max':
        feat1_new = tf.reduce_max(feat1_new, axis=[2], keep_dims=False, name='maxpool_diff')
    elif pooling=='avg':
        feat1_new = tf.reduce_mean(feat1_new, axis=[2], keep_dims=False, name='avgpool_diff')
    return xyz1, feat1_new
def sample_and_group_layer1(npoint,
                            radius,
                            nsample,
                            xyz,
                            points,
                            knn=False,
                            use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    #tf_ops/samples/tf_sampling.py
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3),挑选满足条件的512个像素
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz,
                                        new_xyz)  # 提取512个点index每个点分别属于32个簇之一
    grouped_xyz = group_point(
        xyz, idx)  # (batch_size, npoint, nsample, 3) #提取512个像素每个像素分别属于32个簇之一
    grouped_xyz -= tf.tile(
        tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]
    )  # translation normalization 首先增加1维,(bs,512,3)->(bs,512,1,3),第三维张量扩充32倍->(bs,512,32,3),这里假定npoints=512,nsamples=32
    kernel = tf.Variable(tf.random_normal([32, 16, 3], stddev=0.1, seed=1),
                         name='kernel')
    tf.add_to_collection("kernel", kernel)
    # kernel = tf.convert_to_tensor(kernel)
    kc_points = kernel_correlation(
        grouped_xyz, kernel,
        0.005)  # KC module ==>(b,l,n)===>(BS, npoint, 1, l)
    kc_points = tf.transpose(kc_points, perm=[0, 2, 1])
    kc_points = tf.tile(tf.expand_dims(kc_points, 2), [1, 1, nsample, 1])
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:  #是否考虑原始xyz空间信息
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz
    new_points = tf.concat([kc_points, new_points], axis=-1)

    return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True,
                     centralize_points=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    fpsidx = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, fpsidx)  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)

        if centralize_points:
            central_points = gather_point(points[:, :, :3], fpsidx)
            grouped_points = tf.concat((grouped_points[:, :, :, :3] - tf.tile(
                tf.expand_dims(central_points, 2), [1, 1, nsample, 1]),
                                        grouped_points[:, :, :, 3:]), -1)

        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, \
                is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False):
    ''' 
    new pointnet set abstraction (sa) module with multi-scale grouping (msg)
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        input_points = xyz
        point_cloud_shape = points.get_shape()
        batch_size = point_cloud_shape[0].value
        # sampled_idx = tf.random_uniform(shape=(batch_size,npoint),maxval=npoint-1,dtype=tf.int32) 
        sampled_idx = farthest_point_sample(npoint, xyz)
        new_xyz = gather_point(xyz, sampled_idx)

        sampled_idx = tf.expand_dims(sampled_idx, -1)
        new_points_list = []
        for i in range(len(radius_list)):
            input_points = xyz
            if points is not None:
                if use_xyz:
                    input_points = tf.concat([input_points, points], axis=-1)
                else:
                    input_points = points
            else:
                input_points = xyz

            # fit for mlp
            input_points = tf.expand_dims(input_points, -2)
            print("[MSG-MLP]",input_points.shape, input_points.dtype)
            if use_nchw: input_points = tf.transpose(input_points, [0,3,1,2])
            for j,num_out_channel in enumerate(mlp_list[i]):
                input_points = tf_util.conv2d(input_points, num_out_channel, [1,1],
                                                padding='VALID', stride=[1,1], bn=bn, 
                                                is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay)
            if use_nchw: input_points = tf.transpose(input_points, [0,2,3,1])

            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, _ = query_ball_point(radius, nsample, xyz, new_xyz)
            
            # recover for grouping
            input_points = tf.squeeze(input_points, -2)
            sampled_points = new_group_point(input_points, sampled_idx)
            new_points = new_group_point(input_points, idx)

            # sampled_points = tf.squeeze(sampled_points, -2)
            # new_points -= sampled_points
            new_points = tf.reduce_max(new_points, axis=[2])
            new_points -= tf.squeeze(sampled_points, -2)
            # print(tf.shape(input_points), tf.shape(new_points))
            # sampled_points = gather_point(input_points, sampled_idx)
            new_points_list.append(new_points)

        new_points_concat = tf.concat(new_points_list, axis=-1)
        print("[MSG-MLP] output:",new_points_concat.shape)
        return new_xyz, new_points_concat
Exemple #19
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           scope,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        p1_idx = farthest_point_sample(npoint, xyz)
        new_xyz = gather_point(xyz, p1_idx)
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)  #b*n*k*3
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = conv2d(grouped_points,
                                        num_out_channel, [1, 1],
                                        weight_decay=0,
                                        padding='VALID',
                                        stride=[1, 1],
                                        scope='conv%d_%d' % (i, j),
                                        activation_fn=tf.nn.leaky_relu)
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])  #b*n*c
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #20
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    Workflow:
        Find the <npoint> down-sampled farest points by <farthest_point_sample>
        For each down-sampled point, find <nsample> sub-group points by <query_ball_point>
    '''

    new_xyz = gather_point(
        xyz, farthest_point_sample(npoint, xyz)
    )  # (batch_size, npoint, 3)  the points sampled with farest distance
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        # [batch_size,npoint,nsample] [batch_size,npoint]
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1
                      ])  # translation normalization: minus the center point
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Exemple #21
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points,
                                                num_out_channel, [1, 1],
                                                padding='VALID',
                                                stride=[1, 1],
                                                bn=bn,
                                                is_training=is_training,
                                                scope='conv%d_%d' % (i, j),
                                                bn_decay=bn_decay)
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #22
0
def pointnet_sa_module_msg_rand_tree_triples(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, nshuffles=1, bn=True, use_xyz=True, use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1)
            else:
                grouped_points = grouped_xyz

            for j,num_out_channel in enumerate(mlp_list[i]):
                grouped_points_tr = tf.transpose(grouped_points, [2,0,1,3]) #bringing the points to the first axis
                point_idxs = np.arange(grouped_points_tr.get_shape()[0].value)
                point_idxs = np.resize(point_idxs, (grouped_points_tr.get_shape()[0].value * nshuffles,))
                point_idxs = tf.random.shuffle(point_idxs)
                grouped_points_tr = tf.gather(grouped_points_tr, point_idxs)
                grouped_points = tf.transpose(grouped_points_tr, [1,2,0,3])

                if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2])

                grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,3],
                                                padding='VALID', stride=[1,3], bn=bn, is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay)

            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1])
            
            new_points = tf.squeeze(grouped_points, [2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #23
0
def pointnet_sa_module_msg_bkup(xyz, points, npoint, radius_list, nsample_list,\
                                mlp_list, is_training, bn_decay, scope, bn=True, \
                                use_xyz=True, use_nchw=False):
    ''' pointnet set abstraction (sa) module with multi-scale grouping (msg)
        input:
            xyz: (batch_size, ndataset, 3) tf tensor
            points: (batch_size, ndataset, channel) tf tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for mlp on each point
            use_xyz: bool, if true concat xyz with local point features, otherwise just use point features
            use_nchw: bool, if true, use nchw data format for conv2d, which is usually faster than nhwc format
        return:
            new_xyz: (batch_size, npoint, 3) tf tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) tf tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points,
                                                num_out_channel, [1, 1],
                                                padding='VALID',
                                                stride=[1, 1],
                                                bn=bn,
                                                is_training=is_training,
                                                scope='conv%d_%d' % (i, j),
                                                bn_decay=bn_decay)
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #24
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
        sampled_idx: () TF tensor, idx for sampled points
    '''

    point_cloud_shape = points.get_shape()
    batch_size = point_cloud_shape[0].value
    sampled_idx = farthest_point_sample(npoint, xyz)
    # sampled_idx = tf.random_uniform(shape=(batch_size,npoint),maxval=npoint-1,dtype=tf.int32)

    new_xyz = gather_point(xyz, sampled_idx)  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.expand_dims(new_xyz, 2)  # translation normalization
    if points is not None:
        # grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        # print("grouped_points:", grouped_points.shape)
        # grouping:
        grouped_points = new_group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)

        print("grouped_points:", grouped_points.shape)
        new_points = grouped_points
    else:
        new_points = grouped_xyz

    print("[Group] points:", new_points.shape)
    return new_xyz, new_points, idx, grouped_xyz
Exemple #25
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    #tf_ops/samples/tf_sampling.py
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3),挑选满足条件的512个像素
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz,
                                        new_xyz)  # 提取512个点index每个点分别属于32个簇之一
    grouped_xyz = group_point(
        xyz, idx)  # (batch_size, npoint, nsample, 3) #提取512个像素每个像素分别属于32个簇之一
    grouped_xyz -= tf.tile(
        tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]
    )  # translation normalization 首先增加1维,(bs,512,3)->(bs,512,1,3),第三维张量扩充32倍->(bs,512,32,3),这里假定npoints=512,nsamples=32
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:  #是否考虑原始xyz空间信息
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32 = 1024
        radius: float32 = 0.5,1,2,4
        nsample: int32 = 16
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    new_xyz = gather_point(xyz, farthest_point_sample(npoint,
                                                      xyz))  ### Sampling using farthest point sampling
    # import ipdb; ipdb.set_trace()
    print ('check for seg fault')

    # xyz.shape
    # TensorShape([Dimension(4), Dimension(2048), Dimension(3)])
    # new_xyz.shape
    # TensorShape([Dimension(4), Dimension(1024), Dimension(3)])
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)

    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz,
                                        new_xyz)  ### Grouping using ball query
    grouped_xyz = group_point(xyz,
                              idx)  # (batch_size, npoint, nsample, 3)  ### Resulting grouped coordinates
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  ### translation normalization
    if points is not None:
        grouped_points = group_point(points,
                                     idx)  # (batch_size, npoint, nsample, channel)   ### Resulting grouped features
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points],
                                   axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Exemple #27
0
    def test_grad(self):
        with tf.device('/gpu:0'):
            points = tf.constant(np.random.random((1, 128, 16)).astype('float32'))
            xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
            xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
            radius = 0.3
            nsample = 32
            idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2)
            grouped_points = group_point(points, idx)

        with self.test_session():
            print("---- Going to compute gradient error")
            err = tf.test.compute_gradient_error(points, (1, 128, 16), grouped_points, (1, 8, 32, 16))
            print(err)
            self.assertLess(err, 1e-4)
def softmax_embedding(xyz1, xyz2, feat1, feat2,
                      radius, nsample,
                      mlp,
                      is_training, bn_decay, scope,
                      bn=True, knn=True, corr_func='concat'):

    if knn:
        _, idx = knn_point(nsample, xyz2, xyz1)
    else:
        idx, _ = query_ball_point(radius, nsample, xyz2, xyz1)
    xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint, nsample, 3
    xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint, 1, 3
    xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint, nsample, 3

    feat2_grouped = group_point(feat2, idx) # batch_size, npoint, nsample, channel
    feat1_expanded = tf.expand_dims(feat1, 2) # batch_size, npoint, 1, channel
    feat_diff = feat2_grouped - feat1_expanded

    feat_diff = tf.concat(axis=-1, values=[feat_diff, feat2_grouped, tf.tile(feat1_expanded,[1,1,nsample,1])]) # batch_size, npoint, nsample, channel*2


    feat1_new = tf.concat([feat_diff, xyz_diff], axis=3) # batch_size, npoint, nsample, [channel or 1] + 3
    # TODO: move scope to outer indent
    o=[]
    with tf.variable_scope(scope) as sc:
        for i, num_out_channel in enumerate(mlp):
            activation_fn = tf.nn.relu if i < len(mlp)-1 else None
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
                                       padding='VALID', stride=[1,1],
                                       bn=True, is_training=is_training, #activation_fn=activation_fn,
                                       scope='conv_diff_%d'%(i), bn_decay=bn_decay)
            o.append(feat1_new)
    feat1_new += 0.00001

    feat1_new = tf.squeeze(feat1_new, [3]) # batch_size, npoint1, nsample

    square = feat1_new #tf.square(feat1_new)
    sm = square / tf.expand_dims(tf.reduce_sum(square, axis=-1), axis=-1)
    # sm = tf.nn.softmax(feat1_new) # batch_size, npoint1, nsample
    sm = tf.expand_dims(sm, axis=-1)

    flow_new = xyz_diff * sm # batch_size, npoint, nsample, 3
    flow_new = tf.reduce_sum(flow_new, axis=-2) # batch_size, npoint, 3

    # feat_new = feat2_grouped * sm # batch_size, npoint, nsample, channel
    # feat_new = tf.reduce_sum(feat_new, axis=-2) # batch_size, npoint, channel

    return xyz1, flow_new, sm, idx,o, feat_diff, xyz_diff
Exemple #29
0
def sample_and_group(npoint, radius, nsample, xyz, points):
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3)
    idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        new_points = tf.concat(
            [grouped_xyz, grouped_points],
            axis=-1)  # (batch_size, npoint, nample, 3+channel)
    else:
        new_points = grouped_xyz
    return new_xyz, new_points, idx, grouped_xyz
    def sample_and_group(self, xyz, points):
        '''
        Input:
            npoint: int32
            radius: float32
            nsample: int32
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
            knn: bool, if True use kNN instead of radius search
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Output:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
            idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
            grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
                (subtracted by seed point XYZ) in local regions
        '''
        # 每个batch取样的最远的1024个点
        # new_xyz: b * npoints * 3
        new_xyz = gather_point(xyz, farthest_point_sample(
            self.npoint, xyz))  # (batch_size, npoint, 3)
        if self.knn:
            _, idx = knn_point(self.nsample, xyz, new_xyz)
        else:
            # idx: (batch_size, npoint, nsample) int32 array, indices to input points
            # pts_cnt: (batch_size, npoint) int32 array, number of unique points in each local region
            idx, pts_cnt = query_ball_point(self.radius, self.nsample, xyz,
                                            new_xyz)
        # grouped_xyz: (batch_size, npoint, nsample, channel)
        # according to idx return corresponding chanel
        grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
        # move the points to the center (by minusing the coordinate of the center)
        grouped_xyz -= tf.tile(tf.expand_dims(
            new_xyz, 2), [1, 1, self.nsample, 1])  # translation normalization
        if points is not None:
            grouped_points = group_point(
                points, idx)  # (batch_size, npoint, nsample, channel)
            if self.use_xyz:
                new_points = tf.concat(
                    [grouped_xyz, grouped_points],
                    axis=-1)  # (batch_size, npoint, nample, 3+channel)
            else:
                new_points = grouped_points
        else:
            new_points = grouped_xyz

        return new_xyz, new_points, idx, grouped_xyz
  def test_grad(self):
    with tf.device('/gpu:0'):
      points = tf.constant(np.random.random((1,128,16)).astype('float32'))
      print(points)
      xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32'))
      xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32'))
      radius = 0.3 
      nsample = 32
      idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2)
      grouped_points = group_point(points, idx)
      print(grouped_points)

    with self.test_session():
      print("---- Going to compute gradient error")
      err = tf.test.compute_gradient_error(points, (1,128,16), grouped_points, (1,8,32,16))
      print(err)
      self.assertLess(err, 1e-4) 
Exemple #32
0
def get_repulsion_loss4(pred, nsample=20, radius=0.07):
    # pred: (batch_size, npoint,3)
    idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    h = 0.03
    dist_square = tf.reduce_sum(grouped_pred**2, axis=-1)
    dist_square, idx = tf.nn.top_k(-dist_square, 5)
    dist_square = -dist_square[:, :, 1:]  # remove the first one
    dist_square = tf.maximum(1e-12, dist_square)
    dist = tf.sqrt(dist_square)
    weight = tf.exp(-dist_square / h**2)
    uniform_loss = tf.reduce_mean(radius - dist * weight)
    return uniform_loss
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2])
            for j,num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                                                padding='VALID', stride=[1,1], bn=bn, is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay)
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat