def pointSIFT_KNN(radius, xyz): idx = pointSIFT_select_four(xyz, radius) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, 32, 3) xyz_central = tf.tile(tf.expand_dims(xyz, 2), [1, 1, 32, 1]) # translation normalization a = grouped_xyz - xyz_central new_points = tf.concat([xyz_central, a], axis=-1) return new_points, idx
def pointSIFT_group_four(radius, xyz, points, use_xyz=True): idx = pointSIFT_select_four(xyz, radius) grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, 32, 3) grouped_xyz -= tf.tile(tf.expand_dims(xyz, 2), [1, 1, 32, 1]) # translation normalization if points is not None: grouped_points = group_point(points, idx) # (batch_size, npoint, 8/32, channel) if use_xyz: new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, 8/32, 3+channel) else: new_points = grouped_points else: new_points = grouped_xyz return xyz, new_points, idx, grouped_xyz
def get_model(point_input, is_training, pfs_flag=False, bn_decay=None): """ Classification PointNet, input is BxNxC, output Bx40 """ batch_size = point_input.get_shape()[0].value num_point1 = point_input.get_shape()[1].value num_point2 = int(np.floor(num_point1 / 4.0)) num_point3 = int(np.floor(num_point2 / 4.0)) num_features = point_input.get_shape()[2].value end_points = {} point_cloud1 = point_input k = 32 nn_idx = pointSIFT_select_four(point_cloud1, 0.2) net1_1 = tf_util.attention_conv(point_cloud1, point_input, 64, nn_idx, k, scope='conv_1_1', bn=True, bn_decay=bn_decay, is_training=is_training) net1_2 = tf_util.attention_conv(point_cloud1, net1_1, 64, nn_idx, k, scope='conv_1_2', bn=True, bn_decay=bn_decay, is_training=is_training) k = 30 net, p1_idx, pn_idx, point_cloud2 = tf_util.attention_pooling( net1_2, point_cloud1, num_point2, k, scope='12', bn_decay=bn_decay, is_training=is_training) net1_1 = tf.squeeze( tf.reduce_max(group_point(net1_1, pn_idx), axis=-2, keepdims=True)) net1_2 = net k = 16 nn_idx = pointSIFT_select_two(point_cloud2, 0.4) net2_1 = tf_util.attention_conv(point_cloud2, net, 128, nn_idx, k, scope='conv_2_1', bn=True, bn_decay=bn_decay, is_training=is_training) net2_2 = tf_util.attention_conv(point_cloud2, net2_1, 128, nn_idx, k, scope='conv_2_2', bn=True, bn_decay=bn_decay, is_training=is_training) k = 30 net, p2_idx, pn_idx, point_cloud3 = tf_util.attention_pooling( net2_2, point_cloud2, num_point3, k, scope='13', bn_decay=bn_decay, is_training=is_training) print(6666, net1_1.shape) net1_1 = tf.reduce_max(group_point(net1_1, pn_idx), axis=-2, keepdims=True) net1_2 = tf.reduce_max(group_point(net1_2, pn_idx), axis=-2, keepdims=True) net2_1 = tf.reduce_max(group_point(net2_1, pn_idx), axis=-2, keepdims=True) net2_2 = net k = 16 nn_idx = pointSIFT_select_two(point_cloud3, 0.6) net3_1 = tf_util.attention_conv(point_cloud3, net, 256, nn_idx, k, scope='conv_3_1', bn=True, bn_decay=bn_decay, is_training=is_training) net3_2 = tf_util.attention_conv(point_cloud3, net3_1, 256, nn_idx, k, scope='conv_3_2', bn=True, bn_decay=bn_decay, is_training=is_training) net3_1 = tf.expand_dims(net3_1, axis=-2) net3_2 = tf.expand_dims(net3_2, axis=-2) net2_2 = tf.expand_dims(net2_2, axis=-2) net = tf.concat([net1_1, net1_2, net2_1, net2_2, net3_1, net3_2], axis=-1) net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], activation_fn=tf.nn.relu, bn=True, is_training=is_training, scope='agg', bn_decay=bn_decay) net = tf.reduce_max(net, axis=1, keepdims=True) net = tf.reshape(net, [batch_size, -1]) end_points['embedding'] = net net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dropout1') net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dropout2') net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3') return net, end_points