Exemple #1
0
 def test(self):
     np.random.seed(100)
     triangles = np.random.rand(1, 5, 3, 3).astype("float32")
     with tf.device("/gpu:0"):
         inp = tf.constant(triangles)
         tria = inp[:, :, 0, :]
         trib = inp[:, :, 1, :]
         tric = inp[:, :, 2, :]
         areas = tf.sqrt(
             tf.reduce_sum(tf.linalg.cross(trib - tria, tric -
                                           tria)**2, 2) + 1e-9)
         randomnumbers = tf.random.uniform((1, 8192))
         triids = prob_sample(areas, randomnumbers)
         tria_sample = gather_point(tria, triids)
         trib_sample = gather_point(trib, triids)
         tric_sample = gather_point(tric, triids)
         us = tf.random.uniform((1, 8192))
         vs = tf.random.uniform((1, 8192))
         uplusv = 1 - tf.abs(us + vs - 1)
         uminusv = us - vs
         us = (uplusv + uminusv) * 0.5
         vs = (uplusv - uminusv) * 0.5
         pt_sample = (tria_sample +
                      (trib_sample - tria_sample) * tf.expand_dims(us, -1) +
                      (tric_sample - tria_sample) * tf.expand_dims(vs, -1))
         print("pt_sample: ", pt_sample)
         reduced_sample = gather_point(
             pt_sample, farthest_point_sample(1024, pt_sample))
         print(reduced_sample)
     with tf.compat.v1.Session("") as sess:
         ret = sess.run(reduced_sample)
     print(ret.shape, ret.dtype)
def sampling_with_boundary_label(npoint, pts, labels):

    labels = tf.tile(tf.expand_dims(labels, -1), [1, 1, 3])
    idx = tf_sampling.farthest_point_sample(npoint, pts)
    sub_pts = tf_sampling.gather_point(pts, idx)
    sub_labels = tf_sampling.gather_point(labels, idx)

    return sub_pts, sub_labels[:, :, 0]
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True,
                     centralize_points=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    fpsidx = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, fpsidx)  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)

        if centralize_points:
            central_points = gather_point(points[:, :, :3], fpsidx)
            grouped_points = tf.concat((grouped_points[:, :, :, :3] - tf.tile(
                tf.expand_dims(central_points, 2), [1, 1, nsample, 1]),
                                        grouped_points[:, :, :, 3:]), -1)

        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Exemple #5
0
 def call(self, x):
     '''
     Input: List
         xyz : (batch, n_inputs, 3)
         features : (batch, n_inputs, channels)
     Output: List
         new_xyz: (batch_size, n_centroids, 3) TF tensor
         new_points: (batch_size, n_centroids, n_samples, 3+channel) TF tensor
         centroid_idx: (batch_size, n_centroids) TF tensor, indices of centroid
         grouped_xyz: (batch_size, n_centroids, n_samples, 3) TF tensor, normalized point XYZs
     '''
     xyz, features = x
     if self.random:
         centroid_idx = random_sample(self.n_centroid, xyz)
     else:
         centroid_idx = farthest_point_sample(self.n_centroid, xyz)
     new_xyz = gather_point(xyz, centroid_idx)  # (batch, n_centroid, 3)
     idx, _ = query_ball_point(self.radius, self.n_samples, xyz, new_xyz)
     grouped_xyz = group_point(
         xyz, idx)  # (batch_size, n_centroids, n_sample, 3)
     grouped_xyz -= tf.tile(
         tf.expand_dims(new_xyz, 2),
         [1, 1, self.n_samples, 1])  # translation normalization
     grouped_xyz /= self.radius  # normalize xyz w.r.t the radius
     if self.use_feature:  # can't use None type here
         grouped_points = group_point(
             features, idx)  # (batch_size, n_centroid, n_samples, channels)
         if self.use_xyz:
             new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)
             # (batch_size, n_centroid, n_samples, channels + 3)
         else:
             new_points = grouped_points
     else:
         new_points = grouped_xyz
     return [new_xyz, new_points, centroid_idx, grouped_xyz]
Exemple #6
0
def get_model(sample_num, sample_scale, point_cloud, is_training, filter_sizes, filter_num, bn_decay=None):
    '''
    Input: 
        sample_num: int32; sample M points from originally N points.
        sample_scale: []; find Ki points from sampled points' neighbours
    '''
    batch_size = point_cloud.get_shape()[0].value
    feature_collection = []
    channels = [32, 64, 128]
    M_sampled_points = farthest_point_sample(sample_num, point_cloud)
    # [batch, sample_num, 3]
    new_xyz = gather_point(point_cloud, M_sampled_points)
    for i, scale in enumerate(sample_scale):
        # [batch, sample_num, scale]
        _, idx = knn_point(scale, point_cloud, new_xyz)
        # [batch, sample_num, scale, 3]
        points_features = group_point(point_cloud, idx)
        for j, channel in enumerate(channels):
            # [batch, sample_num, scale, channel]
            points_features = tf_util.conv2d(points_features, channel, [1, 1],
                                             padding = 'VALID', stride = [1, 1],
                                             bn = True, is_training = is_training,
                                             scope='conv_%d_%d'%(i, j), bn_decay = bn_decay,
                                             data_format = 'NHWC')
        # [batch, sample_num, 1, 128]
        points_features = tf.reduce_max(points_features, axis = [2], keep_dims = True, name = 'maxpool')
        # [batch, sample_num, 128]
        points_features = tf.squeeze(points_features, [2])
        # [batch, sample_num, 1, 128]
        points_features = tf.expand_dims(points_features, 2)
        # [batch * sample_num, 1, 128]
        points_features = tf.reshape(points_features, [batch_size * sample_num, 1, channels[-1]])
        feature_collection.append(points_features)

    # [batch * sample_num, len(sample_scale), 128]
    textcnn_embedding = tf.concat(feature_collection, 1)
    # [batch * sample_num, feature_size = 128]
    textcnn_encoded = get_textcnn_model(textcnn_embedding, filter_sizes, filter_num, is_training, bn_decay)
    # [batch, sample_num, feature_size]
    textcnn_encoded = tf.reshape(textcnn_encoded, [batch_size, sample_num, -1])
    # [batch, sample_num, 1, feature_size]
    global_feature = tf.expand_dims(textcnn_encoded, 2)
    channels = [256, 512, 1024]
    for i, channel in enumerate(channels):
        # [batch, sample_num, 1, channel]
        global_feature = tf_util.conv2d(global_feature, channel, [1, 1],
                                        padding = 'VALID', stride = [1, 1],
                                        bn = True, is_training = is_training,
                                        scope='feature_aggregation_conv_%d'%(i), bn_decay = bn_decay,
                                        data_format = 'NHWC')
    # [batch, 1, 1, 1024]
    global_feature = tf.reduce_max(global_feature, axis = [1], keep_dims = True, name = 'global_feature_maxpool')
    # [batch, 1024]
    global_feature = tf.reshape(global_feature, [batch_size, -1])
    classify_feature = tf_util.fully_connected(global_feature, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    classify_feature = tf_util.dropout(classify_feature, keep_prob=0.4, is_training=is_training, scope='dp1')
    classify_feature = tf_util.fully_connected(classify_feature, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    classify_feature = tf_util.dropout(classify_feature, keep_prob=0.4, is_training=is_training, scope='dp2')
    classify_feature = tf_util.fully_connected(classify_feature, 40, activation_fn=None, scope='fc3')
    return classify_feature
Exemple #7
0
def pc_sampling(xyz, feat, nsample, num_point, scope='sampling'):
    """ Fully connected layer with non-linear operation.
  
  Args:
    xyz: 3-D tensor B x N x 3
    nsample: k
    num_point: N2
    feat: 3-D tensor B x N x C
  
  Returns:
    feat_sample: 3-D tensor B x N2 x C
  """
    with tf.variable_scope(scope) as sc:
        xyz_new = gather_point(xyz, farthest_point_sample(num_point, xyz))
        _, idx_pooling = knn_point(nsample, xyz, xyz_new)

        grouped_points = group_point(feat, idx_pooling)
        feat_sample = tf.nn.max_pool(grouped_points, [1, 1, nsample, 1],
                                     [1, 1, 1, 1],
                                     padding='VALID',
                                     data_format='NHWC',
                                     name="MAX_POOLING")
        feat_sample = tf.squeeze(feat_sample, axis=[2])

        return feat_sample, xyz_new
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    ''' New sample_and_group with Fully Delayed-Aggregation
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    point_cloud_shape = points.get_shape()
    batch_size = point_cloud_shape[0].value
    num_points = point_cloud_shape[1].value
    num_dims = point_cloud_shape[-1].value
    
    # get the index and coordinates of sampled points
    sampled_idx = tf.random_uniform(shape=(batch_size, npoint),maxval=npoint-1,dtype=tf.int32)
    new_xyz = gather_point(xyz, sampled_idx) # (batch_size, npoint, 3)
    
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    # grouping:
    idx_ = tf.range(batch_size) * num_points
    idx_ = tf.reshape(idx_, [batch_size, 1, 1])

    points = tf.reshape(points, [-1, num_dims])
    new_points = tf.gather(points, idx + idx_)
    
    # get the sampled points as centroids with xyz+feature for coord correction
    sampled_idx = tf.expand_dims(sampled_idx, -1)
    sampled_points = tf.gather(points, sampled_idx + idx_)

    # coord correction
    new_points -= sampled_points

    # get the new xyz set for sampled points and neighbors
    xyz_shape = xyz.get_shape()
    batch_size = xyz_shape[0].value
    num_points = xyz_shape[1].value
    num_dims = xyz_shape[-1].value

    idx_ = tf.range(batch_size) * num_points
    idx_ = tf.reshape(idx_, [batch_size, 1, 1])
    
    xyz_reshaped = tf.reshape(xyz, [-1, num_dims])
    grouped_xyz = tf.gather(xyz_reshaped, idx + idx_)
  
    grouped_xyz -= tf.expand_dims(new_xyz, 2) # translation normalization

    return new_xyz, new_points, idx, grouped_xyz
Exemple #9
0
def sample_and_group(sample_pt_num, radius, neighbor_size, input_xyz, input_features):
    '''
    Input:
        sample_pt_num: how many points to keep
        radius: query ball radius
        neighbor_size: how many neighbor points
        input_xyz: (batch_size, npoints, 3)
        input_features: (batch_size, npoint, C)
    Output:
        sampled_xyz: (batch_size, sample_pt_num, 3)
        idx: (batch_size, sample_pt_num, neighbor_size)
        sampled_grouped_relation: (batch_size, sample_pt_num, neighbor_size, 10)
        sampled_grouped_features: (batch_size, sample_pt_num, neighbor_size, C)

    '''

    sampled_xyz = gather_point(input_xyz, farthest_point_sample(sample_pt_num, input_xyz))  # (batch_size, sample_pt_num, 3)
    idx, pts_cnt = query_ball_point(radius, neighbor_size, input_xyz, sampled_xyz)
    sampled_grouped_xyz = group_point(input_xyz, idx)  # (batch_size, sample_pt_num, neighbor_size, 3)
    sampled_grouped_features = group_point(input_features, idx)

    sampled_center_xyz = tf.tile(tf.expand_dims(sampled_xyz, 2), [1, 1, neighbor_size, 1])  # (batch_size, npoint, nsample, 3)

    euclidean = tf.reduce_sum(tf.square(sampled_grouped_xyz-sampled_center_xyz), axis=-1, keepdims=True)  # (batch_size, npoint, nsample, 1)
    sampled_grouped_relation = tf.concat([euclidean, sampled_center_xyz-sampled_grouped_xyz,
                                  sampled_center_xyz, sampled_grouped_xyz], axis=-1)  # (batch_size, npoint, nsample, 10)

    return sampled_xyz, idx, sampled_grouped_relation, sampled_grouped_features
Exemple #10
0
def decoder(inputs, features, step_ratio=16, num_fine=16 * 1024):
    num_coarse=1024
    assert num_fine == num_coarse * step_ratio
    with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
        coarse = mlp(features, [1024, 1024, num_coarse * 3], bn=None, bn_params=None)
        coarse = tf.reshape(coarse, [-1, num_coarse, 3])

    p1_idx = farthest_point_sample(512, coarse)
    coarse_1 = gather_point(coarse, p1_idx)
    input_fps = symmetric_sample(inputs, int(512 / 2))
    coarse = tf.concat([input_fps, coarse_1], 1)

    with tf.variable_scope('folding', reuse=tf.AUTO_REUSE):
        if not step_ratio ** .5 % 1 == 0:
            grid = gen_1d_grid(step_ratio)
        else:
            grid = gen_grid(np.round(np.sqrt(step_ratio)).astype(np.int32))
        grid = tf.expand_dims(grid, 0)
        grid_feat = tf.tile(grid, [features.shape[0], num_coarse, 1])
        point_feat = tf.tile(tf.expand_dims(coarse, 2), [1, 1, step_ratio, 1])
        point_feat = tf.reshape(point_feat, [-1, num_fine, 3])
        global_feat = tf.tile(tf.expand_dims(features, 1), [1, num_fine, 1])
        feat = tf.concat([grid_feat, point_feat, global_feat], axis=2)
        fine = mlp_conv(feat, [512, 512, 3], bn=None, bn_params=None) + point_feat
    return coarse, fine
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--folder', '-f', help='Path to data folder')
    
    root = args.folder if args.folder else os.path.join(ROOT_DIR, 'data', 'arch_hdf5_8196')

    folders = [os.path.join(root, folder) for folder in ['test', 'train']]

    for folder in folders:
        datasets = os.listdir(folder)
        for dataset_idx, dataset in enumerate(datasets):
            
            filename_txt = os.path.join(folder, dataset)
            print('{}-Loading {}...'.format(datetime.now(), filename_txt))

            data, label_seg = load_h5_seg(filename_h5)
            assert(data.shape[1] == label_seg.shape[1])

            points_sampled = gather_point(xyz, farthest_point_sample(4096, data)) # (batch_size, npoint, 3)
            print(points_sampled.shape)
            
            labels_sampled = tf.gather_nd(labels_seg, indices=indices, name='labels_sampled')
            print(labels_sampled.shape)

            sampled_filename = os.path.join(folder, dataset + '_4096_sampled.h5')
            file = h5py.File(sampled_filename, 'w')
            file.create_dataset('data', data=points_sampled)
            file.create_dataset('label_seg', data=labels_sampled)
            file.close()
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Exemple #13
0
def get_Geometric_Loss1(predictedPts, targetpoints, FLAGS):

    gen_points = FLAGS.generate_num
    targetpoints = gather_point(targetpoints,
                                farthest_point_sample(
                                    gen_points,
                                    targetpoints))  #将targetpoints按照输出的点的个数采样
    # calculate shape loss
    square_dist = pairwise_l2_norm2_batch(targetpoints, predictedPts)
    dist = tf.sqrt(square_dist)  # 开方
    minRow = tf.reduce_min(dist, axis=2)  ## 在降维后的第二维,即y那维找最小
    minCol = tf.reduce_min(dist, axis=1)  ## 在x那一维找最小值
    shapeLoss = tf.reduce_mean(minRow) + tf.reduce_mean(
        minCol)  ## 在[batchsize,x]取平均

    # calculate density loss
    square_dist2 = pairwise_l2_norm2_batch(targetpoints, targetpoints)
    dist2 = tf.sqrt(square_dist2)
    knndis = tf.nn.top_k(tf.negative(dist), k=FLAGS.nnk)  #返回每行最大的8个数  列是target
    knndis2 = tf.nn.top_k(tf.negative(dist2), k=FLAGS.nnk)
    densityLoss1 = tf.reduce_mean(tf.abs(knndis.values - knndis2.values))

    # density loss on source shape
    square_dist3 = pairwise_l2_norm2_batch(predictedPts, predictedPts)
    dist3 = tf.sqrt(square_dist3 + 1e-12)  # 开方
    dist4 = tf.transpose(dist, perm=[0, 2, 1])  #按照predicted找最小
    knndis3 = tf.nn.top_k(tf.negative(dist3), k=FLAGS.nnk)  #返回每行最大的8个数
    knndis4 = tf.nn.top_k(tf.negative(dist4), k=FLAGS.nnk)
    densityLoss2 = tf.reduce_mean(tf.abs(knndis3.values - knndis4.values))

    densityLoss = densityLoss1 + densityLoss2

    data_loss = shapeLoss + densityLoss * FLAGS.densityWeight
    return data_loss, shapeLoss, densityLoss
Exemple #14
0
def get_uniform_loss(pcd,
                     percentages=[0.004, 0.006, 0.008, 0.010, 0.012],
                     radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        disk_area = math.pi * (radius**2) * p / nsample
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        #expect_len =  tf.sqrt(2*disk_area/1.732)#using hexagon
        expect_len = tf.sqrt(disk_area)  # using square

        grouped_pcd = group_point(pcd, idx)
        grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)

        var, _ = knn_point(2, grouped_pcd, grouped_pcd)
        uniform_dis = -var[:, :, 1:]
        uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8))
        uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1])
        uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
        uniform_dis = tf.reshape(uniform_dis, [-1])

        mean, variance = tf.nn.moments(uniform_dis, axes=0)
        mean = mean * math.pow(p * 100, 2)
        #nothing 4
        loss.append(mean)
    return tf.add_n(loss) / len(percentages)
Exemple #15
0
def get_model(layer_pts, is_training, sconv_params, sdconv_params, fc_params, sampling='random', weight_decay=0.0, bn_decay=None, part_num=8):
    if sampling == 'fps':
        sys.path.append(os.path.join(BASE_DIR, 'tf_ops/sampling'))
        from tf_sampling import farthest_point_sample, gather_point

    layer_fts_list = [None]
    layer_pts_list = [layer_pts]
    for layer_idx, layer_param in enumerate(sconv_params):
        tag = 'sconv_' + str(layer_idx + 1) + '_'
        K = layer_param['K']
        D = layer_param['D']
        P = layer_param['P']
        C = layer_param['C']
        if P == -1:
            qrs = layer_pts
        else:
            if sampling == 'fps':
                qrs = gather_point(layer_pts, farthest_point_sample(P, layer_pts))
            elif sampling == 'random':
                qrs = tf.slice(layer_pts, (0, 0, 0), (-1, P, -1), name=tag + 'qrs')  # (N, P, 3)
            else:
                print('Unknown sampling method!')
                exit()

        layer_fts= shellconv(layer_pts_list[-1], layer_fts_list[-1], qrs, is_training, tag, K, D, P, C, True, bn_decay)

        layer_pts = qrs
        layer_pts_list.append(qrs)
        layer_fts_list.append(layer_fts)
  
    if sdconv_params is not None:
        fts = layer_fts_list[-1]
        for layer_idx, layer_param in enumerate(sdconv_params):
            tag = 'sdconv_' + str(layer_idx + 1) + '_'
            K = layer_param['K'] 
            D = layer_param['D'] 
            pts_layer_idx = layer_param['pts_layer_idx']  # 2 1 0 
            qrs_layer_idx = layer_param['qrs_layer_idx']  # 1 0 -1

            pts = layer_pts_list[pts_layer_idx + 1]
            qrs = layer_pts_list[qrs_layer_idx + 1]
            fts_qrs = layer_fts_list[qrs_layer_idx + 1]

            C = fts_qrs.get_shape()[-1].value if fts_qrs is not None else C//2
            P = qrs.get_shape()[1].value
            
            layer_fts= shellconv(pts, fts, qrs, is_training, tag, K, D, P, C, True, bn_decay)
            if fts_qrs is not None: # this is for last layer
                fts_concat = tf.concat([layer_fts, fts_qrs], axis=-1, name=tag + 'fts_concat')
                fts = dense(fts_concat, C, is_training, tag + 'mlp', bn_decay=bn_decay)           
            
    for layer_idx, layer_param in enumerate(fc_params):
        C = layer_param['C']
        dropout_rate = layer_param['dropout_rate']
        layer_fts = dense(layer_fts, C, is_training, name='fc{:d}'.format(layer_idx), bn_decay=bn_decay)
        layer_fts = tf.layers.dropout(layer_fts, rate=dropout_rate, name='fc{:d}_dropout'.format(layer_idx))
    
    logits = dense(layer_fts, part_num, is_training, name='logits', activation=None)
    return logits
def sample_and_group_layer1(npoint,
                            radius,
                            nsample,
                            xyz,
                            points,
                            knn=False,
                            use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    #tf_ops/samples/tf_sampling.py
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3),挑选满足条件的512个像素
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz,
                                        new_xyz)  # 提取512个点index每个点分别属于32个簇之一
    grouped_xyz = group_point(
        xyz, idx)  # (batch_size, npoint, nsample, 3) #提取512个像素每个像素分别属于32个簇之一
    grouped_xyz -= tf.tile(
        tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]
    )  # translation normalization 首先增加1维,(bs,512,3)->(bs,512,1,3),第三维张量扩充32倍->(bs,512,32,3),这里假定npoints=512,nsamples=32
    kernel = tf.Variable(tf.random_normal([32, 16, 3], stddev=0.1, seed=1),
                         name='kernel')
    tf.add_to_collection("kernel", kernel)
    # kernel = tf.convert_to_tensor(kernel)
    kc_points = kernel_correlation(
        grouped_xyz, kernel,
        0.005)  # KC module ==>(b,l,n)===>(BS, npoint, 1, l)
    kc_points = tf.transpose(kc_points, perm=[0, 2, 1])
    kc_points = tf.tile(tf.expand_dims(kc_points, 2), [1, 1, nsample, 1])
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:  #是否考虑原始xyz空间信息
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz
    new_points = tf.concat([kc_points, new_points], axis=-1)

    return new_xyz, new_points, idx, grouped_xyz
def conv_dir_module(xyz, direction, points, npoint, radius, mlp, conv, mlp2, is_training, bn_decay, scope,
                    kernel_size=9, bn=True, use_xyz=True, use_nchw=False, center=False):
    with tf.variable_scope(scope) as sc:
        ids = farthest_point_sample(npoint, xyz)
        new_xyz = gather_point(xyz, ids)
        x_dir = gather_point(direction[:, :, 0:3], ids)
        y_dir = gather_point(direction[:, :, 3:6], ids)
        new_direction = tf.concat([x_dir, y_dir], axis=2)
        if kernel_size in [7, 13, 19]:
            weight, nbhd_idx = get_patch_rot(xyz, new_xyz, x_dir, y_dir, kernel_size, radius)
        else:
            weight, nbhd_idx = get_patch(xyz, new_xyz, x_dir, y_dir, kernel_size, radius)
        new_points = conv_module(xyz, new_xyz, points, weight, nbhd_idx, mlp=mlp, conv=conv, mlp2=mlp2,
                                 is_training=is_training, bn_decay=bn_decay, center=center,
                                 scope=scope, bn=bn, use_xyz=use_xyz, use_nchw=use_nchw, use_pooling=True)
        new_points = tf.nn.relu(new_points)
        return new_points, new_xyz, new_direction
Exemple #18
0
def farthest_point_sample_edge(npoint, xyz, edge_ids):
    data = tf.Variable(tf.zeros([1, xyz.shape[1], 3], tf.int32),
                       validate_shape=False)
    data.set_shape((1, None, 3))
    data[0, ...] = xyz[0, edge_ids, :]
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))

    return new_xyz
Exemple #19
0
def symmetric_sample(points, num):
    p1_idx = farthest_point_sample(num, points)
    input_fps = gather_point(points, p1_idx)
    input_fps_flip = tf.concat(
        [tf.expand_dims(input_fps[:, :, 0], axis=2), tf.expand_dims(input_fps[:, :, 1], axis=2),
         tf.expand_dims(-input_fps[:, :, 2], axis=2)], axis=2)
    input_fps = tf.concat([input_fps, input_fps_flip], 1)
    return input_fps
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, \
                is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False):
    ''' 
    new pointnet set abstraction (sa) module with multi-scale grouping (msg)
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        input_points = xyz
        point_cloud_shape = points.get_shape()
        batch_size = point_cloud_shape[0].value
        # sampled_idx = tf.random_uniform(shape=(batch_size,npoint),maxval=npoint-1,dtype=tf.int32) 
        sampled_idx = farthest_point_sample(npoint, xyz)
        new_xyz = gather_point(xyz, sampled_idx)

        sampled_idx = tf.expand_dims(sampled_idx, -1)
        new_points_list = []
        for i in range(len(radius_list)):
            input_points = xyz
            if points is not None:
                if use_xyz:
                    input_points = tf.concat([input_points, points], axis=-1)
                else:
                    input_points = points
            else:
                input_points = xyz

            # fit for mlp
            input_points = tf.expand_dims(input_points, -2)
            print("[MSG-MLP]",input_points.shape, input_points.dtype)
            if use_nchw: input_points = tf.transpose(input_points, [0,3,1,2])
            for j,num_out_channel in enumerate(mlp_list[i]):
                input_points = tf_util.conv2d(input_points, num_out_channel, [1,1],
                                                padding='VALID', stride=[1,1], bn=bn, 
                                                is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay)
            if use_nchw: input_points = tf.transpose(input_points, [0,2,3,1])

            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, _ = query_ball_point(radius, nsample, xyz, new_xyz)
            
            # recover for grouping
            input_points = tf.squeeze(input_points, -2)
            sampled_points = new_group_point(input_points, sampled_idx)
            new_points = new_group_point(input_points, idx)

            # sampled_points = tf.squeeze(sampled_points, -2)
            # new_points -= sampled_points
            new_points = tf.reduce_max(new_points, axis=[2])
            new_points -= tf.squeeze(sampled_points, -2)
            # print(tf.shape(input_points), tf.shape(new_points))
            # sampled_points = gather_point(input_points, sampled_idx)
            new_points_list.append(new_points)

        new_points_concat = tf.concat(new_points_list, axis=-1)
        print("[MSG-MLP] output:",new_points_concat.shape)
        return new_xyz, new_points_concat
Exemple #21
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           scope,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        p1_idx = farthest_point_sample(npoint, xyz)
        new_xyz = gather_point(xyz, p1_idx)
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)  #b*n*k*3
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = conv2d(grouped_points,
                                        num_out_channel, [1, 1],
                                        weight_decay=0,
                                        padding='VALID',
                                        stride=[1, 1],
                                        scope='conv%d_%d' % (i, j),
                                        activation_fn=tf.nn.leaky_relu)
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])  #b*n*c
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #22
0
def get_tf_sess_pl(npoint, batch_size, num_gpu):

    pc_placeholder = tf.placeholder(tf.float32, shape=[batch_size, None, 3])
    feature_placeholder = tf.placeholder(tf.float32,
                                         shape=[batch_size, None, 4])

    device_batch_size = batch_size // num_gpu
    new_xyz_gpu = []
    new_feature_gpu = []

    for i in range(num_gpu):
        with tf.device('/gpu:%d' % (i)), tf.name_scope('gpu_%d' %
                                                       (i)) as scope:
            pc_batch = tf.slice(pc_placeholder, [i * device_batch_size, 0, 0],
                                [device_batch_size, -1, -1])
            feature_batch = tf.slice(feature_placeholder,
                                     [i * device_batch_size, 0, 0],
                                     [device_batch_size, -1, -1])

            sample_idx = farthest_point_sample(npoint, pc_batch)
            new_xyz = gather_point(pc_batch, sample_idx)
            new_feature_part_1 = gather_point(feature_batch[:, :, :3],
                                              sample_idx)
            new_feature_part_2 = gather_point(feature_batch[:, :, -3:],
                                              sample_idx)
            new_feature = tf.concat([
                new_feature_part_1,
                tf.expand_dims(new_feature_part_2[:, :, -1], axis=-1)
            ],
                                    axis=-1)

            new_xyz_gpu.append(new_xyz)
            new_feature_gpu.append(new_feature)

    new_xyz = tf.concat(new_xyz_gpu, 0)
    new_feature = tf.concat(new_feature_gpu, 0)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    sess = tf.Session(config=config)

    return sess, new_xyz, new_feature, pc_placeholder, feature_placeholder
Exemple #23
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    Workflow:
        Find the <npoint> down-sampled farest points by <farthest_point_sample>
        For each down-sampled point, find <nsample> sub-group points by <query_ball_point>
    '''

    new_xyz = gather_point(
        xyz, farthest_point_sample(npoint, xyz)
    )  # (batch_size, npoint, 3)  the points sampled with farest distance
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        # [batch_size,npoint,nsample] [batch_size,npoint]
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1
                      ])  # translation normalization: minus the center point
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Exemple #24
0
def get_loss(reconstructed_points, original_points, type='chamfer', w=0, mu=0, sigma=0,fv=0, add_fv_loss=False):

    n_points = reconstructed_points.get_shape()[1].value
    d = tf.constant(0)
    matched_out = tf.constant(0)

    if type == 'chamfer':
        #Chamfer Distance
        s1_s2 = tf.reduce_sum(tf.reduce_min(pairwise_diff(reconstructed_points, original_points), axis=2), axis=1)
        s2_s1 = tf.reduce_sum(tf.reduce_min(pairwise_diff(original_points, reconstructed_points), axis=2), axis=1)
        loss = (s1_s2 + s2_s1)/ n_points
    elif type == 'emd':
        matchl_out, matchr_out = tf_auctionmatch.auction_match(original_points, reconstructed_points)
        matched_out = tf_sampling.gather_point(reconstructed_points, matchl_out)
        d = tf.sqrt(tf.reduce_sum(tf.square(original_points - matched_out), axis=2))
        loss = tf.reduce_sum(d,axis=1)/ n_points
    elif type=='joint':
        #Use both loss functions
        s1_s2 = tf.reduce_sum(tf.reduce_min(pairwise_diff(reconstructed_points, original_points), axis=2), axis=1)
        s2_s1 = tf.reduce_sum(tf.reduce_min(pairwise_diff(original_points, reconstructed_points), axis=2), axis=1)
        loss_chamfer = s1_s2 + s2_s1

        matchl_out, matchr_out = tf_auctionmatch.auction_match(original_points, reconstructed_points)
        matched_out = tf_sampling.gather_point(reconstructed_points, matchl_out)
        d = tf.sqrt(tf.reduce_sum(tf.square(original_points - matched_out), axis=2))
        loss_emd = tf.reduce_sum(d,axis=1)
        loss = (loss_chamfer + loss_emd) / n_points

    loss = tf.reduce_mean(loss)
    tf.summary.scalar('emd_loss', loss)

    if add_fv_loss:
        fv_rec = tf_util.get_fv_tf_no_mvn(reconstructed_points, w, mu, sigma, flatten=False)
        fv_loss = tf.nn.l2_loss(fv_rec-fv)
        tf.summary.scalar('fv_loss', fv_loss)
        loss = loss + 0.001*fv_loss





    # tf.summary.scalar('weight_decay_loss', weight_decay_loss)
    return loss, d, matched_out
def get_emd_loss(pred, pc, radius):
    """ pred: BxNxC,
        label: BxN, """
    batch_size = pred.get_shape()[0].value
    matchl_out, matchr_out = tf_auctionmatch.auction_match(pred, pc)
    matched_out = tf_sampling.gather_point(pc, matchl_out)
    dist = tf.reshape((pred - matched_out)**2, shape=(batch_size, -1))
    dist = tf.reduce_mean(dist, axis=1, keep_dims=True)
    dist_norm = dist / radius
    emd_loss = tf.reduce_mean(dist_norm)
    return emd_loss
Exemple #26
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points,
                                                num_out_channel, [1, 1],
                                                padding='VALID',
                                                stride=[1, 1],
                                                bn=bn,
                                                is_training=is_training,
                                                scope='conv%d_%d' % (i, j),
                                                bn_decay=bn_decay)
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #27
0
def sampling(npoint, pts):
    '''
    inputs:
    npoint: scalar, number of points to sample
    pointcloud: B * N * 3, input point cloud
    output:
    sub_pts: B * npoint * 3, sub-sampled point cloud
    '''

    sub_pts = tf_sampling.gather_point(pts, tf_sampling.farthest_point_sample(npoint, pts))
    return sub_pts
Exemple #28
0
def pointconv_sampling(npoint, pts):
    """
    inputs:
    npoint: scalar, number of points to sample
    pointcloud: B * N * 3, input point cloud
    output:
    sub_pts: B * npoint * 3, sub-sampled point cloud
    """

    sub_pts = gather_point(pts, farthest_point_sample(npoint, pts))
    return sub_pts
Exemple #29
0
def pointnet_sa_module_msg_rand_tree_triples(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, nshuffles=1, bn=True, use_xyz=True, use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1)
            else:
                grouped_points = grouped_xyz

            for j,num_out_channel in enumerate(mlp_list[i]):
                grouped_points_tr = tf.transpose(grouped_points, [2,0,1,3]) #bringing the points to the first axis
                point_idxs = np.arange(grouped_points_tr.get_shape()[0].value)
                point_idxs = np.resize(point_idxs, (grouped_points_tr.get_shape()[0].value * nshuffles,))
                point_idxs = tf.random.shuffle(point_idxs)
                grouped_points_tr = tf.gather(grouped_points_tr, point_idxs)
                grouped_points = tf.transpose(grouped_points_tr, [1,2,0,3])

                if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2])

                grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,3],
                                                padding='VALID', stride=[1,3], bn=bn, is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay)

            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1])
            
            new_points = tf.squeeze(grouped_points, [2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #30
0
def pointnet_sa_module_msg_bkup(xyz, points, npoint, radius_list, nsample_list,\
                                mlp_list, is_training, bn_decay, scope, bn=True, \
                                use_xyz=True, use_nchw=False):
    ''' pointnet set abstraction (sa) module with multi-scale grouping (msg)
        input:
            xyz: (batch_size, ndataset, 3) tf tensor
            points: (batch_size, ndataset, channel) tf tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for mlp on each point
            use_xyz: bool, if true concat xyz with local point features, otherwise just use point features
            use_nchw: bool, if true, use nchw data format for conv2d, which is usually faster than nhwc format
        return:
            new_xyz: (batch_size, npoint, 3) tf tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) tf tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points,
                                                num_out_channel, [1, 1],
                                                padding='VALID',
                                                stride=[1, 1],
                                                bn=bn,
                                                is_training=is_training,
                                                scope='conv%d_%d' % (i, j),
                                                bn_decay=bn_decay)
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Exemple #31
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
        sampled_idx: () TF tensor, idx for sampled points
    '''

    point_cloud_shape = points.get_shape()
    batch_size = point_cloud_shape[0].value
    sampled_idx = farthest_point_sample(npoint, xyz)
    # sampled_idx = tf.random_uniform(shape=(batch_size,npoint),maxval=npoint-1,dtype=tf.int32)

    new_xyz = gather_point(xyz, sampled_idx)  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.expand_dims(new_xyz, 2)  # translation normalization
    if points is not None:
        # grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        # print("grouped_points:", grouped_points.shape)
        # grouping:
        grouped_points = new_group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)

        print("grouped_points:", grouped_points.shape)
        new_points = grouped_points
    else:
        new_points = grouped_xyz

    print("[Group] points:", new_points.shape)
    return new_xyz, new_points, idx, grouped_xyz
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2])
            for j,num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                                                padding='VALID', stride=[1,1], bn=bn, is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay)
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat