def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True): ''' Input: npoint: int32 radius: float32 nsample: int32 xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points knn: bool, if True use kNN instead of radius search use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Output: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, nsample, 3+channel) TF tensor idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions ''' new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3) if knn: _,idx = knn_point(nsample, xyz, new_xyz) else: idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization if points is not None: grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel) if use_xyz: new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz
def radius_search_and_group(centroids_xyz, radius, num_neighbors, points_xyz, points_features): """ Perform radius search and grouping of points_xyz around each centroids_xyz Args: centroids_xyz: tf.tensor, xyz locations of centroids radius: float, radius of spherical region around centroid num_neighbors: int, number of neighbors to include in grouping per centroid points_xyz: tf.tensor, xyz locations of points points_features: tf.tensor, features of points Returns tf.tensor, grouped points and point features """ # Radius search around each centroid, returning num_neighbors point indices within radius of centroid point_indices, _ = tf_grouping.query_ball_point(radius, num_neighbors, points_xyz, centroids_xyz) # Group neighboring points (and corresponding point features) together grouped_points_xyz = tf_grouping.group_point(points_xyz, point_indices) # (batch_size, num_centroids, num_neighbors, 3) grouped_points_features = tf_grouping.group_point(points_features, point_indices) # (batch_size, num_centroids, num_neighbors, num_features) # Normalize points' xyz locations in local region by subtracting the xyz of the centroid of that region grouped_points_xyz -= tf.tile(tf.expand_dims(centroids_xyz, 2), [1,1, num_neighbors ,1]) grouped_points_xyz_and_features = tf.concat([grouped_points_xyz, grouped_points_features], axis=-1) # (batch_size, num_centroids, num_neighbors, 3+num_features) return grouped_points_xyz_and_features
def sample_and_group_ByKeypoints(keypoints_xyz, nsample, xyz, sn, is_training): ''' Input: keypoints_xyz:BxMx3 nsample: int32 xyz: bxNx3 Return: grouped_augmented:BxMxnsamplex3 idx:BxMxnsample int grouped_xyz:#BxMxnsamplex3 grouped_xyz_center:BxMx3 ''' _, idx = knn_point(nsample, xyz, keypoints_xyz) grouped_xyz = group_point(xyz, idx) #BxMxnsamplex3 grouped_xyz_center = tf.reduce_mean(grouped_xyz, axis=2) #BxMx3 grouped_xyz_decentered = grouped_xyz - tf.tile( tf.expand_dims(grouped_xyz_center, 2), [1, 1, nsample, 1]) if sn is not None: grouped_sn = group_point(sn, idx) #BxMxnsamplex3 grouped_augmented = tf.concat([grouped_xyz_decentered, grouped_sn], -1) #BxMxnsamplex6 else: grouped_augmented = grouped_xyz_decentered #BxMxnsamplex3 return grouped_augmented, idx, grouped_xyz, grouped_xyz_center
def test(self): knn = True np.random.seed(100) pts = np.random.random((32, 512, 64)).astype("float32") tmp1 = np.random.random((32, 512, 3)).astype("float32") tmp2 = np.random.random((32, 128, 3)).astype("float32") with tf.device("/gpu:0"): points = tf.constant(pts) xyz1 = tf.constant(tmp1) xyz2 = tf.constant(tmp2) radius = 0.1 nsample = 64 if knn: _, idx = knn_point(nsample, xyz1, xyz2) grouped_points = group_point(points, idx) else: idx, _ = query_ball_point(radius, nsample, xyz1, xyz2) grouped_points = group_point(points, idx) # grouped_points_grad = tf.ones_like(grouped_points) # points_grad = tf.gradients(grouped_points, points, grouped_points_grad) with tf.compat.v1.Session("") as sess: now = time.time() for _ in range(100): ret = sess.run(grouped_points) print(time.time() - now) print(ret.shape, ret.dtype) print(ret)
def call(self, x): ''' Input: List xyz : (batch, n_inputs, 3) features : (batch, n_inputs, channels) Output: List new_xyz: (batch_size, n_centroids, 3) TF tensor new_points: (batch_size, n_centroids, n_samples, 3+channel) TF tensor centroid_idx: (batch_size, n_centroids) TF tensor, indices of centroid grouped_xyz: (batch_size, n_centroids, n_samples, 3) TF tensor, normalized point XYZs ''' xyz, features = x if self.random: centroid_idx = random_sample(self.n_centroid, xyz) else: centroid_idx = farthest_point_sample(self.n_centroid, xyz) new_xyz = gather_point(xyz, centroid_idx) # (batch, n_centroid, 3) idx, _ = query_ball_point(self.radius, self.n_samples, xyz, new_xyz) grouped_xyz = group_point( xyz, idx) # (batch_size, n_centroids, n_sample, 3) grouped_xyz -= tf.tile( tf.expand_dims(new_xyz, 2), [1, 1, self.n_samples, 1]) # translation normalization grouped_xyz /= self.radius # normalize xyz w.r.t the radius if self.use_feature: # can't use None type here grouped_points = group_point( features, idx) # (batch_size, n_centroid, n_samples, channels) if self.use_xyz: new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, n_centroid, n_samples, channels + 3) else: new_points = grouped_points else: new_points = grouped_xyz return [new_xyz, new_points, centroid_idx, grouped_xyz]
def sample_and_group(sample_pt_num, radius, neighbor_size, input_xyz, input_features): ''' Input: sample_pt_num: how many points to keep radius: query ball radius neighbor_size: how many neighbor points input_xyz: (batch_size, npoints, 3) input_features: (batch_size, npoint, C) Output: sampled_xyz: (batch_size, sample_pt_num, 3) idx: (batch_size, sample_pt_num, neighbor_size) sampled_grouped_relation: (batch_size, sample_pt_num, neighbor_size, 10) sampled_grouped_features: (batch_size, sample_pt_num, neighbor_size, C) ''' sampled_xyz = gather_point(input_xyz, farthest_point_sample(sample_pt_num, input_xyz)) # (batch_size, sample_pt_num, 3) idx, pts_cnt = query_ball_point(radius, neighbor_size, input_xyz, sampled_xyz) sampled_grouped_xyz = group_point(input_xyz, idx) # (batch_size, sample_pt_num, neighbor_size, 3) sampled_grouped_features = group_point(input_features, idx) sampled_center_xyz = tf.tile(tf.expand_dims(sampled_xyz, 2), [1, 1, neighbor_size, 1]) # (batch_size, npoint, nsample, 3) euclidean = tf.reduce_sum(tf.square(sampled_grouped_xyz-sampled_center_xyz), axis=-1, keepdims=True) # (batch_size, npoint, nsample, 1) sampled_grouped_relation = tf.concat([euclidean, sampled_center_xyz-sampled_grouped_xyz, sampled_center_xyz, sampled_grouped_xyz], axis=-1) # (batch_size, npoint, nsample, 10) return sampled_xyz, idx, sampled_grouped_relation, sampled_grouped_features
def flow_embedding_module(xyz1, xyz2, feat1, feat2, radius, nsample, mlp, is_training, bn_decay, scope, bn=True, pooling='max', knn=True, corr_func='elementwise_product'): """ Input: xyz1: (batch_size, npoint, 3) xyz2: (batch_size, npoint, 3) feat1: (batch_size, npoint, channel) feat2: (batch_size, npoint, channel) Output: xyz1: (batch_size, npoint, 3) feat1_new: (batch_size, npoint, mlp[-1]) """ if knn: _, idx = knn_point(nsample, xyz2, xyz1) else: idx, cnt = query_ball_point(radius, nsample, xyz2, xyz1) _, idx_knn = knn_point(nsample, xyz2, xyz1) cnt = tf.tile(tf.expand_dims(cnt, -1), [1,1,nsample]) idx = tf.where(cnt > (nsample-1), idx, idx_knn) xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint, nsample, 3 xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint, 1, 3 xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint, nsample, 3 feat2_grouped = group_point(feat2, idx) # batch_size, npoint, nsample, channel feat1_expanded = tf.expand_dims(feat1, 2) # batch_size, npoint, 1, channel # TODO: change distance function if corr_func == 'elementwise_product': feat_diff = feat2_grouped * feat1_expanded # batch_size, npoint, nsample, channel elif corr_func == 'concat': feat_diff = tf.concat(axis=-1, values=[feat2_grouped, tf.tile(feat1_expanded,[1,1,nsample,1])]) # batch_size, npoint, sample, channel*2 elif corr_func == 'dot_product': feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1 elif corr_func == 'cosine_dist': feat2_grouped = tf.nn.l2_normalize(feat2_grouped, -1) feat1_expanded = tf.nn.l2_normalize(feat1_expanded, -1) feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1 elif corr_func == 'flownet_like': # assuming square patch size k = 0 as the FlowNet paper batch_size = xyz1.get_shape()[0].value npoint = xyz1.get_shape()[1].value feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1 total_diff = tf.concat(axis=-1, values=[xyz_diff, feat_diff]) # batch_size, npoint, nsample, 4 feat1_new = tf.reshape(total_diff, [batch_size, npoint, -1]) # batch_size, npoint, nsample*4 #feat1_new = tf.concat(axis=[-1], values=[feat1_new, feat1]) # batch_size, npoint, nsample*4+channel return xyz1, feat1_new feat1_new = tf.concat([feat_diff, xyz_diff], axis=3) # batch_size, npoint, nsample, [channel or 1] + 3 # TODO: move scope to outer indent with tf.variable_scope(scope) as sc: for i, num_out_channel in enumerate(mlp): feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1], padding='VALID', stride=[1,1], bn=True, is_training=is_training, scope='conv_diff_%d'%(i), bn_decay=bn_decay) if pooling=='max': feat1_new = tf.reduce_max(feat1_new, axis=[2], keep_dims=False, name='maxpool_diff') elif pooling=='avg': feat1_new = tf.reduce_mean(feat1_new, axis=[2], keep_dims=False, name='avgpool_diff') return xyz1, feat1_new
def sample_and_group_layer1(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True): ''' Input: npoint: int32 radius: float32 nsample: int32 xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points knn: bool, if True use kNN instead of radius search use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Output: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, nsample, 3+channel) TF tensor idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions ''' #tf_ops/samples/tf_sampling.py new_xyz = gather_point(xyz, farthest_point_sample( npoint, xyz)) # (batch_size, npoint, 3),挑选满足条件的512个像素 if knn: _, idx = knn_point(nsample, xyz, new_xyz) else: idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) # 提取512个点index每个点分别属于32个簇之一 grouped_xyz = group_point( xyz, idx) # (batch_size, npoint, nsample, 3) #提取512个像素每个像素分别属于32个簇之一 grouped_xyz -= tf.tile( tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1] ) # translation normalization 首先增加1维,(bs,512,3)->(bs,512,1,3),第三维张量扩充32倍->(bs,512,32,3),这里假定npoints=512,nsamples=32 kernel = tf.Variable(tf.random_normal([32, 16, 3], stddev=0.1, seed=1), name='kernel') tf.add_to_collection("kernel", kernel) # kernel = tf.convert_to_tensor(kernel) kc_points = kernel_correlation( grouped_xyz, kernel, 0.005) # KC module ==>(b,l,n)===>(BS, npoint, 1, l) kc_points = tf.transpose(kc_points, perm=[0, 2, 1]) kc_points = tf.tile(tf.expand_dims(kc_points, 2), [1, 1, nsample, 1]) if points is not None: grouped_points = group_point( points, idx) # (batch_size, npoint, nsample, channel) if use_xyz: #是否考虑原始xyz空间信息 new_points = tf.concat( [grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nsample, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz new_points = tf.concat([kc_points, new_points], axis=-1) return new_xyz, new_points, idx, grouped_xyz
def set_upconv_module(xyz1, xyz2, feat1, feat2, nsample, mlp, mlp2, is_training, scope, bn_decay=None, bn=True, pooling='max', radius=None, knn=True): """ Feature propagation from xyz2 (less points) to xyz1 (more points) Inputs: xyz1: (batch_size, npoint1, 3) xyz2: (batch_size, npoint2, 3) feat1: (batch_size, npoint1, channel1) features for xyz1 points (earlier layers) feat2: (batch_size, npoint2, channel2) features for xyz2 points Output: feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3) TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating. """ with tf.variable_scope(scope) as sc: if knn: l2_dist, idx = knn_point(nsample, xyz2, xyz1) else: idx, pts_cnt = query_ball_point(radius, nsample, xyz2, xyz1) xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint1, nsample, 3 xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint1, 1, 3 xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint1, nsample, 3 feat2_grouped = group_point(feat2, idx) # batch_size, npoint1, nsample, channel2 net = tf.concat([feat2_grouped, xyz_diff], axis=3) # batch_size, npoint1, nsample, channel2+3 if mlp is None: mlp = [] for i, num_out_channel in enumerate(mlp): net = tf_util.conv2d(net, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv%d' % (i), bn_decay=bn_decay) if pooling == 'max': feat1_new = tf.reduce_max(net, axis=[2], keep_dims=False, name='maxpool') # batch_size, npoint1, mlp[-1] elif pooling == 'avg': feat1_new = tf.reduce_mean(net, axis=[2], keep_dims=False, name='avgpool') # batch_size, npoint1, mlp[-1] if feat1 is not None: feat1_new = tf.concat([feat1_new, feat1], axis=2) # batch_size, npoint1, mlp[-1]+channel1 feat1_new = tf.expand_dims(feat1_new, 2) # batch_size, npoint1, 1, mlp[-1]+channel2 if mlp2 is None: mlp2 = [] for i, num_out_channel in enumerate(mlp2): feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='post-conv%d' % (i), bn_decay=bn_decay) feat1_new = tf.squeeze(feat1_new, [2]) # batch_size, npoint1, mlp2[-1] return feat1_new
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True, centralize_points=False): ''' Input: npoint: int32 radius: float32 nsample: int32 xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet knn: bool, if True use kNN instead of radius search use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Output: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, nsample, 3+channel) TF tensor idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions ''' fpsidx = farthest_point_sample(npoint, xyz) new_xyz = gather_point(xyz, fpsidx) # (batch_size, npoint, 3) if knn: _, idx = knn_point(nsample, xyz, new_xyz) else: idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) # translation normalization if tnet_spec is not None: grouped_xyz = tnet(grouped_xyz, tnet_spec) if points is not None: grouped_points = group_point( points, idx) # (batch_size, npoint, nsample, channel) if centralize_points: central_points = gather_point(points[:, :, :3], fpsidx) grouped_points = tf.concat((grouped_points[:, :, :, :3] - tf.tile( tf.expand_dims(central_points, 2), [1, 1, nsample, 1]), grouped_points[:, :, :, 3:]), -1) if use_xyz: new_points = tf.concat( [grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nsample, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, scope, use_xyz=True, use_nchw=False): ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG) Input: xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor npoint: int32 -- #points sampled in farthest point sampling radius: list of float32 -- search radius in local region nsample: list of int32 -- how many points in each local region mlp: list of list of int32 -- output size for MLP on each point use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format Return: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor ''' with tf.variable_scope(scope) as sc: p1_idx = farthest_point_sample(npoint, xyz) new_xyz = gather_point(xyz, p1_idx) new_points_list = [] for i in range(len(radius_list)): radius = radius_list[i] nsample = nsample_list[i] idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) #b*n*k*3 grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) if points is not None: grouped_points = group_point(points, idx) if use_xyz: grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1) else: grouped_points = grouped_xyz if use_nchw: grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2]) for j, num_out_channel in enumerate(mlp_list[i]): grouped_points = conv2d(grouped_points, num_out_channel, [1, 1], weight_decay=0, padding='VALID', stride=[1, 1], scope='conv%d_%d' % (i, j), activation_fn=tf.nn.leaky_relu) if use_nchw: grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1]) new_points = tf.reduce_max(grouped_points, axis=[2]) #b*n*c new_points_list.append(new_points) new_points_concat = tf.concat(new_points_list, axis=-1) return new_xyz, new_points_concat
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True): ''' Input: npoint: int32 radius: float32 nsample: int32 xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet knn: bool, if True use kNN instead of radius search use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Output: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, nsample, 3+channel) TF tensor idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions Workflow: Find the <npoint> down-sampled farest points by <farthest_point_sample> For each down-sampled point, find <nsample> sub-group points by <query_ball_point> ''' new_xyz = gather_point( xyz, farthest_point_sample(npoint, xyz) ) # (batch_size, npoint, 3) the points sampled with farest distance if knn: _, idx = knn_point(nsample, xyz, new_xyz) else: # [batch_size,npoint,nsample] [batch_size,npoint] idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) grouped_xyz -= tf.tile(tf.expand_dims( new_xyz, 2), [1, 1, nsample, 1 ]) # translation normalization: minus the center point if tnet_spec is not None: grouped_xyz = tnet(grouped_xyz, tnet_spec) if points is not None: grouped_points = group_point( points, idx) # (batch_size, npoint, nsample, channel) if use_xyz: new_points = tf.concat( [grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True): ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG) Input: xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor npoint: int32 -- #points sampled in farthest point sampling radius: list of float32 -- search radius in local region nsample: list of int32 -- how many points in each local region mlp: list of list of int32 -- output size for MLP on each point use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Return: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor ''' with tf.variable_scope(scope) as sc: new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) new_points_list = [] for i in range(len(radius_list)): radius = radius_list[i] nsample = nsample_list[i] idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) if points is not None: grouped_points = group_point(points, idx) if use_xyz: grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1) else: grouped_points = grouped_xyz for j, num_out_channel in enumerate(mlp_list[i]): grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv%d_%d' % (i, j), bn_decay=bn_decay) new_points = tf.reduce_max(grouped_points, axis=[2]) new_points_list.append(new_points) new_points_concat = tf.concat(new_points_list, axis=-1) return new_xyz, new_points_concat
def pointLKPO_group_with_idx(xyz, idx, points, use_xyz=True): grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, 8, 3) grouped_xyz -= tf.tile(tf.expand_dims(xyz, 2), [1, 1, 8, 1]) # translation normalization if points is not None: grouped_points = group_point(points, idx) # (batch_size, npoint, 8/32, channel) if use_xyz: new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, 8/32, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return xyz, new_points, idx, grouped_xyz
def pointnet_sa_module_msg_rand_tree_triples(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, nshuffles=1, bn=True, use_xyz=True, use_nchw=False): ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG) Input: xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor npoint: int32 -- #points sampled in farthest point sampling radius: list of float32 -- search radius in local region nsample: list of int32 -- how many points in each local region mlp: list of list of int32 -- output size for MLP on each point use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format Return: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor ''' data_format = 'NCHW' if use_nchw else 'NHWC' with tf.variable_scope(scope) as sc: new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) new_points_list = [] for i in range(len(radius_list)): radius = radius_list[i] nsample = nsample_list[i] idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) if points is not None: grouped_points = group_point(points, idx) if use_xyz: grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1) else: grouped_points = grouped_xyz for j,num_out_channel in enumerate(mlp_list[i]): grouped_points_tr = tf.transpose(grouped_points, [2,0,1,3]) #bringing the points to the first axis point_idxs = np.arange(grouped_points_tr.get_shape()[0].value) point_idxs = np.resize(point_idxs, (grouped_points_tr.get_shape()[0].value * nshuffles,)) point_idxs = tf.random.shuffle(point_idxs) grouped_points_tr = tf.gather(grouped_points_tr, point_idxs) grouped_points = tf.transpose(grouped_points_tr, [1,2,0,3]) if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2]) grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,3], padding='VALID', stride=[1,3], bn=bn, is_training=is_training, scope='conv%d_%d'%(i,j), bn_decay=bn_decay) if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1]) new_points = tf.squeeze(grouped_points, [2]) new_points_list.append(new_points) new_points_concat = tf.concat(new_points_list, axis=-1) return new_xyz, new_points_concat
def pointnet_sa_module_msg_bkup(xyz, points, npoint, radius_list, nsample_list,\ mlp_list, is_training, bn_decay, scope, bn=True, \ use_xyz=True, use_nchw=False): ''' pointnet set abstraction (sa) module with multi-scale grouping (msg) input: xyz: (batch_size, ndataset, 3) tf tensor points: (batch_size, ndataset, channel) tf tensor npoint: int32 -- #points sampled in farthest point sampling radius: list of float32 -- search radius in local region nsample: list of int32 -- how many points in each local region mlp: list of list of int32 -- output size for mlp on each point use_xyz: bool, if true concat xyz with local point features, otherwise just use point features use_nchw: bool, if true, use nchw data format for conv2d, which is usually faster than nhwc format return: new_xyz: (batch_size, npoint, 3) tf tensor new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) tf tensor ''' data_format = 'NCHW' if use_nchw else 'NHWC' with tf.variable_scope(scope) as sc: new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) new_points_list = [] for i in range(len(radius_list)): radius = radius_list[i] nsample = nsample_list[i] idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) if points is not None: grouped_points = group_point(points, idx) if use_xyz: grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1) else: grouped_points = grouped_xyz if use_nchw: grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2]) for j, num_out_channel in enumerate(mlp_list[i]): grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv%d_%d' % (i, j), bn_decay=bn_decay) if use_nchw: grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1]) new_points = tf.reduce_max(grouped_points, axis=[2]) new_points_list.append(new_points) new_points_concat = tf.concat(new_points_list, axis=-1) return new_xyz, new_points_concat
def pointSIFT_group_four(radius, xyz, points, use_xyz=True): idx = pointSIFT_select_four(xyz, radius) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, 32, 3) grouped_xyz -= tf.tile(tf.expand_dims(xyz, 2), [1, 1, 32, 1]) # translation normalization if points is not None: grouped_points = group_point(points, idx) # (batch_size, npoint, 8/32, channel) if use_xyz: new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, 8/32, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True): ''' Input: npoint: int32 = 1024 radius: float32 = 0.5,1,2,4 nsample: int32 = 16 xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points knn: bool, if True use kNN instead of radius search use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Output: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, nsample, 3+channel) TF tensor idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions ''' new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) ### Sampling using farthest point sampling # import ipdb; ipdb.set_trace() print ('check for seg fault') # xyz.shape # TensorShape([Dimension(4), Dimension(2048), Dimension(3)]) # new_xyz.shape # TensorShape([Dimension(4), Dimension(1024), Dimension(3)]) if knn: _, idx = knn_point(nsample, xyz, new_xyz) else: idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) ### Grouping using ball query grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) ### Resulting grouped coordinates grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) ### translation normalization if points is not None: grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel) ### Resulting grouped features if use_xyz: new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True): ''' Input: npoint: int32 radius: float32 nsample: int32 xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points knn: bool, if True use kNN instead of radius search use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Output: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, nsample, 3+channel) TF tensor idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions ''' #tf_ops/samples/tf_sampling.py new_xyz = gather_point(xyz, farthest_point_sample( npoint, xyz)) # (batch_size, npoint, 3),挑选满足条件的512个像素 if knn: _, idx = knn_point(nsample, xyz, new_xyz) else: idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) # 提取512个点index每个点分别属于32个簇之一 grouped_xyz = group_point( xyz, idx) # (batch_size, npoint, nsample, 3) #提取512个像素每个像素分别属于32个簇之一 grouped_xyz -= tf.tile( tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1] ) # translation normalization 首先增加1维,(bs,512,3)->(bs,512,1,3),第三维张量扩充32倍->(bs,512,32,3),这里假定npoints=512,nsamples=32 if points is not None: grouped_points = group_point( points, idx) # (batch_size, npoint, nsample, channel) if use_xyz: #是否考虑原始xyz空间信息 new_points = tf.concat( [grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nsample, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True): ''' Input: npoint: int32 radius: float32 nsample: int32 xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet knn: bool, if True use kNN instead of radius search use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Output: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, nsample, 3+channel) TF tensor idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions ''' new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3) if knn: _,idx = knn_point(nsample, xyz, new_xyz) else: if np.isscalar(radius): idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) else: idx_list = [] for radius_one, xyz_one, new_xyz_one in zip(tf.unstack(radius,axis=0), tf.unstack(xyz, axis=0),tf.unstack(new_xyz, axis=0)): idx_one, _ = query_ball_point(radius_one, nsample, tf.expand_dims(xyz_one, axis=0), tf.expand_dims(new_xyz_one, axis=0)) idx_list.append(idx_one) idx = tf.stack(idx_list, axis=0) idx = tf.squeeze(idx, axis=1) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization if tnet_spec is not None: grouped_xyz = tnet(grouped_xyz, tnet_spec) if points is not None: grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel) if use_xyz: # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]),grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) new_points = tf.concat([grouped_xyz, grouped_points],axis=-1) # (batch_size, npoint, nample, 3+channel) else: new_points = grouped_points else: # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])], axis=-1) new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz
def softmax_embedding(xyz1, xyz2, feat1, feat2, radius, nsample, mlp, is_training, bn_decay, scope, bn=True, knn=True, corr_func='concat'): if knn: _, idx = knn_point(nsample, xyz2, xyz1) else: idx, _ = query_ball_point(radius, nsample, xyz2, xyz1) xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint, nsample, 3 xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint, 1, 3 xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint, nsample, 3 feat2_grouped = group_point(feat2, idx) # batch_size, npoint, nsample, channel feat1_expanded = tf.expand_dims(feat1, 2) # batch_size, npoint, 1, channel feat_diff = feat2_grouped - feat1_expanded feat_diff = tf.concat(axis=-1, values=[feat_diff, feat2_grouped, tf.tile(feat1_expanded,[1,1,nsample,1])]) # batch_size, npoint, nsample, channel*2 feat1_new = tf.concat([feat_diff, xyz_diff], axis=3) # batch_size, npoint, nsample, [channel or 1] + 3 # TODO: move scope to outer indent o=[] with tf.variable_scope(scope) as sc: for i, num_out_channel in enumerate(mlp): activation_fn = tf.nn.relu if i < len(mlp)-1 else None feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1], padding='VALID', stride=[1,1], bn=True, is_training=is_training, #activation_fn=activation_fn, scope='conv_diff_%d'%(i), bn_decay=bn_decay) o.append(feat1_new) feat1_new += 0.00001 feat1_new = tf.squeeze(feat1_new, [3]) # batch_size, npoint1, nsample square = feat1_new #tf.square(feat1_new) sm = square / tf.expand_dims(tf.reduce_sum(square, axis=-1), axis=-1) # sm = tf.nn.softmax(feat1_new) # batch_size, npoint1, nsample sm = tf.expand_dims(sm, axis=-1) flow_new = xyz_diff * sm # batch_size, npoint, nsample, 3 flow_new = tf.reduce_sum(flow_new, axis=-2) # batch_size, npoint, 3 # feat_new = feat2_grouped * sm # batch_size, npoint, nsample, channel # feat_new = tf.reduce_sum(feat_new, axis=-2) # batch_size, npoint, channel return xyz1, flow_new, sm, idx,o, feat_diff, xyz_diff
def sample_and_group(npoint, radius, nsample, xyz, points): new_xyz = gather_point(xyz, farthest_point_sample( npoint, xyz)) # (batch_size, npoint, 3) idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) # translation normalization if points is not None: grouped_points = group_point( points, idx) # (batch_size, npoint, nsample, channel) new_points = tf.concat( [grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) else: new_points = grouped_xyz return new_xyz, new_points, idx, grouped_xyz
def pc_sampling(xyz, feat, nsample, num_point, scope='sampling'): """ Fully connected layer with non-linear operation. Args: xyz: 3-D tensor B x N x 3 nsample: k num_point: N2 feat: 3-D tensor B x N x C Returns: feat_sample: 3-D tensor B x N2 x C """ with tf.variable_scope(scope) as sc: xyz_new = gather_point(xyz, farthest_point_sample(num_point, xyz)) _, idx_pooling = knn_point(nsample, xyz, xyz_new) grouped_points = group_point(feat, idx_pooling) feat_sample = tf.nn.max_pool(grouped_points, [1, 1, nsample, 1], [1, 1, 1, 1], padding='VALID', data_format='NHWC', name="MAX_POOLING") feat_sample = tf.squeeze(feat_sample, axis=[2]) return feat_sample, xyz_new
def grouping(feature, K, src_xyz, q_xyz, use_xyz=True, use_knn=True, radius=0.2): ''' K: neighbor size src_xyz: original point xyz (batch_size, ndataset, 3) q_xyz: query point xyz (batch_size, npoint, 3) ''' batch_size = src_xyz.get_shape()[0] npoint = q_xyz.get_shape()[1] if use_knn: point_indices = tf.py_func(knn_query, [K, src_xyz, q_xyz], tf.int32) batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1, 1)), (1, npoint, K, 1)) idx = tf.concat([batch_indices, tf.expand_dims(point_indices, axis=3)], axis=3) idx.set_shape([batch_size, npoint, K, 2]) grouped_xyz = tf.gather_nd(src_xyz, idx) else: point_indices, _ = tf_grouping.query_ball_point(radius, K, src_xyz, q_xyz) grouped_xyz = tf_grouping.group_point(src_xyz, point_indices) grouped_feature = tf.gather_nd(feature, idx) if use_xyz: grouped_feature = tf.concat([grouped_xyz, grouped_feature], axis=-1) return grouped_xyz, grouped_feature, idx
def group(xyz, idx): grouped_xyz = group_point(xyz, idx) xyz_central = tf.tile(tf.expand_dims(xyz, 2), [1, 1, 32, 1]) a = grouped_xyz - xyz_central new_points = tf.concat([xyz_central, a], axis=-1) return new_points, a
def get_model(sample_num, sample_scale, point_cloud, is_training, filter_sizes, filter_num, bn_decay=None): ''' Input: sample_num: int32; sample M points from originally N points. sample_scale: []; find Ki points from sampled points' neighbours ''' batch_size = point_cloud.get_shape()[0].value feature_collection = [] channels = [32, 64, 128] M_sampled_points = farthest_point_sample(sample_num, point_cloud) # [batch, sample_num, 3] new_xyz = gather_point(point_cloud, M_sampled_points) for i, scale in enumerate(sample_scale): # [batch, sample_num, scale] _, idx = knn_point(scale, point_cloud, new_xyz) # [batch, sample_num, scale, 3] points_features = group_point(point_cloud, idx) for j, channel in enumerate(channels): # [batch, sample_num, scale, channel] points_features = tf_util.conv2d(points_features, channel, [1, 1], padding = 'VALID', stride = [1, 1], bn = True, is_training = is_training, scope='conv_%d_%d'%(i, j), bn_decay = bn_decay, data_format = 'NHWC') # [batch, sample_num, 1, 128] points_features = tf.reduce_max(points_features, axis = [2], keep_dims = True, name = 'maxpool') # [batch, sample_num, 128] points_features = tf.squeeze(points_features, [2]) # [batch, sample_num, 1, 128] points_features = tf.expand_dims(points_features, 2) # [batch * sample_num, 1, 128] points_features = tf.reshape(points_features, [batch_size * sample_num, 1, channels[-1]]) feature_collection.append(points_features) # [batch * sample_num, len(sample_scale), 128] textcnn_embedding = tf.concat(feature_collection, 1) # [batch * sample_num, feature_size = 128] textcnn_encoded = get_textcnn_model(textcnn_embedding, filter_sizes, filter_num, is_training, bn_decay) # [batch, sample_num, feature_size] textcnn_encoded = tf.reshape(textcnn_encoded, [batch_size, sample_num, -1]) # [batch, sample_num, 1, feature_size] global_feature = tf.expand_dims(textcnn_encoded, 2) channels = [256, 512, 1024] for i, channel in enumerate(channels): # [batch, sample_num, 1, channel] global_feature = tf_util.conv2d(global_feature, channel, [1, 1], padding = 'VALID', stride = [1, 1], bn = True, is_training = is_training, scope='feature_aggregation_conv_%d'%(i), bn_decay = bn_decay, data_format = 'NHWC') # [batch, 1, 1, 1024] global_feature = tf.reduce_max(global_feature, axis = [1], keep_dims = True, name = 'global_feature_maxpool') # [batch, 1024] global_feature = tf.reshape(global_feature, [batch_size, -1]) classify_feature = tf_util.fully_connected(global_feature, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) classify_feature = tf_util.dropout(classify_feature, keep_prob=0.4, is_training=is_training, scope='dp1') classify_feature = tf_util.fully_connected(classify_feature, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) classify_feature = tf_util.dropout(classify_feature, keep_prob=0.4, is_training=is_training, scope='dp2') classify_feature = tf_util.fully_connected(classify_feature, 40, activation_fn=None, scope='fc3') return classify_feature
def get_repulsion_loss(pred, nsample=20, radius=0.07, knn=False, use_l1=False, h=0.001): if knn: _, idx = knn_point_2(nsample, pred, pred) pts_cnt = tf.constant(nsample, shape=(30, 1024)) else: idx, pts_cnt = query_ball_point(radius, nsample, pred, pred) tf.summary.histogram('smooth/unque_index', pts_cnt) grouped_pred = group_point(pred, idx) # (batch_size, npoint, nsample, 3) grouped_pred -= tf.expand_dims(pred, 2) # get the uniform loss if use_l1: dists = tf.reduce_sum(tf.abs(grouped_pred), axis=-1) else: dists = tf.reduce_sum(grouped_pred**2, axis=-1) val, idx = tf.nn.top_k(-dists, 5) val = val[:, :, 1:] # remove the first one if use_l1: h = np.sqrt(h) * 2 print(("h is ", h)) val = tf.maximum(0.0, h + val) # dd/np.sqrt(n) repulsion_loss = tf.reduce_mean(val) return repulsion_loss
def get_perulsion_loss(pred, nsample=15, radius=0.07, knn=False, numpoint=512, use_l1=False): # pred: (batch_size, npoint,3) if knn: with tf.device('/gpu:1'): _, idx = knn_point_2(nsample, pred, pred) pts_cnt = tf.constant(nsample, shape=(30, numpoint)) else: idx, pts_cnt = query_ball_point(radius, nsample, pred, pred) tf.summary.histogram('smooth/unque_index', pts_cnt) grouped_pred = group_point(pred, idx) # (batch_size, npoint, nsample, 3) grouped_pred -= tf.expand_dims(pred, 2) ##get the uniform loss dists = tf.reduce_sum(grouped_pred**2, axis=-1) if use_l1: dists = tf.sqrt(dists + 1e-12) val, idx = tf.nn.top_k(-dists, 5) val = val[:, :, 1:] # remove the first one if use_l1: h = np.sqrt(0.001) * 2 else: h = 0.01 print("h is ", h) val = tf.maximum(0.0, h + val) # dd/np.sqrt(n) perulsion_loss = tf.reduce_mean(val) return perulsion_loss
def kernel_density_estimation_ball(pts, radius, sigma, N_points = 128, is_norm = False): with tf.variable_scope("ComputeDensity") as sc: idx, pts_cnt = tf_grouping.query_ball_point(radius, N_points, pts, pts) g_pts = tf_grouping.group_point(pts, idx) g_pts -= tf.tile(tf.expand_dims(pts, 2), [1, 1, N_points, 1]) R = tf.sqrt(sigma) xRinv = tf.div(g_pts, R) quadform = tf.reduce_sum(tf.square(xRinv), axis = -1) logsqrtdetSigma = tf.log(R) * 3 mvnpdf = tf.exp(-0.5 * quadform - logsqrtdetSigma - 3 * tf.log(2 * 3.1415926) / 2) first_val, _ = tf.split(mvnpdf, [1, N_points - 1], axis = 2) mvnpdf = tf.reduce_sum(mvnpdf, axis = 2, keepdims = True) num_val_to_sub = tf.expand_dims(tf.cast(tf.subtract(N_points, pts_cnt), dtype = tf.float32), axis = -1) val_to_sub = tf.multiply(first_val, num_val_to_sub) mvnpdf = tf.subtract(mvnpdf, val_to_sub) scale = tf.div(1.0, tf.expand_dims(tf.cast(pts_cnt, dtype = tf.float32), axis = -1)) density = tf.multiply(mvnpdf, scale) if is_norm: #grouped_xyz_sum = tf.reduce_sum(grouped_xyz, axis = 1, keepdims = True) density_max = tf.reduce_max(density, axis = 1, keepdims = True) density = tf.div(density, density_max) return density
def get_uniform_loss(pcd, percentages=[0.004, 0.006, 0.008, 0.010, 0.012], radius=1.0): B, N, C = pcd.get_shape().as_list() npoint = int(N * 0.05) loss = [] for p in percentages: nsample = int(N * p) r = math.sqrt(p * radius) disk_area = math.pi * (radius**2) * p / nsample #print(npoint,nsample) new_xyz = gather_point(pcd, farthest_point_sample( npoint, pcd)) # (batch_size, npoint, 3) idx, pts_cnt = query_ball_point( r, nsample, pcd, new_xyz) #(batch_size, npoint, nsample) #expect_len = tf.sqrt(2*disk_area/1.732)#using hexagon expect_len = tf.sqrt(disk_area) # using square grouped_pcd = group_point(pcd, idx) grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0) var, _ = knn_point(2, grouped_pcd, grouped_pcd) uniform_dis = -var[:, :, 1:] uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8)) uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1]) uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8) uniform_dis = tf.reshape(uniform_dis, [-1]) mean, variance = tf.nn.moments(uniform_dis, axes=0) mean = mean * math.pow(p * 100, 2) #nothing 4 loss.append(mean) return tf.add_n(loss) / len(percentages)
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False): ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG) Input: xyz: (batch_size, ndataset, 3) TF tensor points: (batch_size, ndataset, channel) TF tensor npoint: int32 -- #points sampled in farthest point sampling radius: list of float32 -- search radius in local region nsample: list of int32 -- how many points in each local region mlp: list of list of int32 -- output size for MLP on each point use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format Return: new_xyz: (batch_size, npoint, 3) TF tensor new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor ''' data_format = 'NCHW' if use_nchw else 'NHWC' with tf.variable_scope(scope) as sc: new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) new_points_list = [] for i in range(len(radius_list)): radius = radius_list[i] nsample = nsample_list[i] idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) grouped_xyz = group_point(xyz, idx) grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) if points is not None: grouped_points = group_point(points, idx) if use_xyz: grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1) else: grouped_points = grouped_xyz if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2]) for j,num_out_channel in enumerate(mlp_list[i]): grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1], padding='VALID', stride=[1,1], bn=bn, is_training=is_training, scope='conv%d_%d'%(i,j), bn_decay=bn_decay) if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1]) new_points = tf.reduce_max(grouped_points, axis=[2]) new_points_list.append(new_points) new_points_concat = tf.concat(new_points_list, axis=-1) return new_xyz, new_points_concat
def test_grad(self): with tf.device('/gpu:0'): points = tf.constant(np.random.random((1,128,16)).astype('float32')) print(points) xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) radius = 0.3 nsample = 32 idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2) grouped_points = group_point(points, idx) print(grouped_points) with self.test_session(): print("---- Going to compute gradient error") err = tf.test.compute_gradient_error(points, (1,128,16), grouped_points, (1,8,32,16)) print(err) self.assertLess(err, 1e-4)