예제 #1
0
def flow_embedding_module(xyz1, xyz2, feat1, feat2, radius, nsample, mlp, is_training, bn_decay, scope, bn=True, pooling='max', knn=True, corr_func='elementwise_product'):
    """
    Input:
        xyz1: (batch_size, npoint, 3)
        xyz2: (batch_size, npoint, 3)
        feat1: (batch_size, npoint, channel)
        feat2: (batch_size, npoint, channel)
    Output:
        xyz1: (batch_size, npoint, 3)
        feat1_new: (batch_size, npoint, mlp[-1])
    """
    if knn:
        _, idx = knn_point(nsample, xyz2, xyz1)
    else:
        idx, cnt = query_ball_point(radius, nsample, xyz2, xyz1)
        _, idx_knn = knn_point(nsample, xyz2, xyz1)
        cnt = tf.tile(tf.expand_dims(cnt, -1), [1,1,nsample])
        idx = tf.where(cnt > (nsample-1), idx, idx_knn)

    xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint, nsample, 3
    xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint, 1, 3
    xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint, nsample, 3

    feat2_grouped = group_point(feat2, idx) # batch_size, npoint, nsample, channel
    feat1_expanded = tf.expand_dims(feat1, 2) # batch_size, npoint, 1, channel
    # TODO: change distance function
    if corr_func == 'elementwise_product':
        feat_diff = feat2_grouped * feat1_expanded # batch_size, npoint, nsample, channel
    elif corr_func == 'concat':
        feat_diff = tf.concat(axis=-1, values=[feat2_grouped, tf.tile(feat1_expanded,[1,1,nsample,1])]) # batch_size, npoint, sample, channel*2
    elif corr_func == 'dot_product':
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
    elif corr_func == 'cosine_dist':
        feat2_grouped = tf.nn.l2_normalize(feat2_grouped, -1)
        feat1_expanded = tf.nn.l2_normalize(feat1_expanded, -1)
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
    elif corr_func == 'flownet_like': # assuming square patch size k = 0 as the FlowNet paper
        batch_size = xyz1.get_shape()[0].value
        npoint = xyz1.get_shape()[1].value
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
        total_diff = tf.concat(axis=-1, values=[xyz_diff, feat_diff]) # batch_size, npoint, nsample, 4
        feat1_new = tf.reshape(total_diff, [batch_size, npoint, -1]) # batch_size, npoint, nsample*4
        #feat1_new = tf.concat(axis=[-1], values=[feat1_new, feat1]) # batch_size, npoint, nsample*4+channel
        return xyz1, feat1_new


    feat1_new = tf.concat([feat_diff, xyz_diff], axis=3) # batch_size, npoint, nsample, [channel or 1] + 3
    # TODO: move scope to outer indent
    with tf.variable_scope(scope) as sc:
        for i, num_out_channel in enumerate(mlp):
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
                                       padding='VALID', stride=[1,1],
                                       bn=True, is_training=is_training,
                                       scope='conv_diff_%d'%(i), bn_decay=bn_decay)
    if pooling=='max':
        feat1_new = tf.reduce_max(feat1_new, axis=[2], keep_dims=False, name='maxpool_diff')
    elif pooling=='avg':
        feat1_new = tf.reduce_mean(feat1_new, axis=[2], keep_dims=False, name='avgpool_diff')
    return xyz1, feat1_new
예제 #2
0
 def test(self):
     knn = True
     np.random.seed(100)
     pts = np.random.random((32, 512, 64)).astype("float32")
     tmp1 = np.random.random((32, 512, 3)).astype("float32")
     tmp2 = np.random.random((32, 128, 3)).astype("float32")
     with tf.device("/gpu:0"):
         points = tf.constant(pts)
         xyz1 = tf.constant(tmp1)
         xyz2 = tf.constant(tmp2)
         radius = 0.1
         nsample = 64
         if knn:
             _, idx = knn_point(nsample, xyz1, xyz2)
             grouped_points = group_point(points, idx)
         else:
             idx, _ = query_ball_point(radius, nsample, xyz1, xyz2)
             grouped_points = group_point(points, idx)
             # grouped_points_grad = tf.ones_like(grouped_points)
             # points_grad = tf.gradients(grouped_points, points, grouped_points_grad)
     with tf.compat.v1.Session("") as sess:
         now = time.time()
         for _ in range(100):
             ret = sess.run(grouped_points)
         print(time.time() - now)
         print(ret.shape, ret.dtype)
         print(ret)
예제 #3
0
def get_uniform_loss(pcd,
                     percentages=[0.004, 0.006, 0.008, 0.010, 0.012],
                     radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        disk_area = math.pi * (radius**2) * p / nsample
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        #expect_len =  tf.sqrt(2*disk_area/1.732)#using hexagon
        expect_len = tf.sqrt(disk_area)  # using square

        grouped_pcd = group_point(pcd, idx)
        grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)

        var, _ = knn_point(2, grouped_pcd, grouped_pcd)
        uniform_dis = -var[:, :, 1:]
        uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8))
        uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1])
        uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
        uniform_dis = tf.reshape(uniform_dis, [-1])

        mean, variance = tf.nn.moments(uniform_dis, axes=0)
        mean = mean * math.pow(p * 100, 2)
        #nothing 4
        loss.append(mean)
    return tf.add_n(loss) / len(percentages)
예제 #4
0
def get_model(sample_num, sample_scale, point_cloud, is_training, filter_sizes, filter_num, bn_decay=None):
    '''
    Input: 
        sample_num: int32; sample M points from originally N points.
        sample_scale: []; find Ki points from sampled points' neighbours
    '''
    batch_size = point_cloud.get_shape()[0].value
    feature_collection = []
    channels = [32, 64, 128]
    M_sampled_points = farthest_point_sample(sample_num, point_cloud)
    # [batch, sample_num, 3]
    new_xyz = gather_point(point_cloud, M_sampled_points)
    for i, scale in enumerate(sample_scale):
        # [batch, sample_num, scale]
        _, idx = knn_point(scale, point_cloud, new_xyz)
        # [batch, sample_num, scale, 3]
        points_features = group_point(point_cloud, idx)
        for j, channel in enumerate(channels):
            # [batch, sample_num, scale, channel]
            points_features = tf_util.conv2d(points_features, channel, [1, 1],
                                             padding = 'VALID', stride = [1, 1],
                                             bn = True, is_training = is_training,
                                             scope='conv_%d_%d'%(i, j), bn_decay = bn_decay,
                                             data_format = 'NHWC')
        # [batch, sample_num, 1, 128]
        points_features = tf.reduce_max(points_features, axis = [2], keep_dims = True, name = 'maxpool')
        # [batch, sample_num, 128]
        points_features = tf.squeeze(points_features, [2])
        # [batch, sample_num, 1, 128]
        points_features = tf.expand_dims(points_features, 2)
        # [batch * sample_num, 1, 128]
        points_features = tf.reshape(points_features, [batch_size * sample_num, 1, channels[-1]])
        feature_collection.append(points_features)

    # [batch * sample_num, len(sample_scale), 128]
    textcnn_embedding = tf.concat(feature_collection, 1)
    # [batch * sample_num, feature_size = 128]
    textcnn_encoded = get_textcnn_model(textcnn_embedding, filter_sizes, filter_num, is_training, bn_decay)
    # [batch, sample_num, feature_size]
    textcnn_encoded = tf.reshape(textcnn_encoded, [batch_size, sample_num, -1])
    # [batch, sample_num, 1, feature_size]
    global_feature = tf.expand_dims(textcnn_encoded, 2)
    channels = [256, 512, 1024]
    for i, channel in enumerate(channels):
        # [batch, sample_num, 1, channel]
        global_feature = tf_util.conv2d(global_feature, channel, [1, 1],
                                        padding = 'VALID', stride = [1, 1],
                                        bn = True, is_training = is_training,
                                        scope='feature_aggregation_conv_%d'%(i), bn_decay = bn_decay,
                                        data_format = 'NHWC')
    # [batch, 1, 1, 1024]
    global_feature = tf.reduce_max(global_feature, axis = [1], keep_dims = True, name = 'global_feature_maxpool')
    # [batch, 1024]
    global_feature = tf.reshape(global_feature, [batch_size, -1])
    classify_feature = tf_util.fully_connected(global_feature, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    classify_feature = tf_util.dropout(classify_feature, keep_prob=0.4, is_training=is_training, scope='dp1')
    classify_feature = tf_util.fully_connected(classify_feature, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    classify_feature = tf_util.dropout(classify_feature, keep_prob=0.4, is_training=is_training, scope='dp2')
    classify_feature = tf_util.fully_connected(classify_feature, 40, activation_fn=None, scope='fc3')
    return classify_feature
예제 #5
0
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    ''' New sample_and_group with Fully Delayed-Aggregation
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    point_cloud_shape = points.get_shape()
    batch_size = point_cloud_shape[0].value
    num_points = point_cloud_shape[1].value
    num_dims = point_cloud_shape[-1].value
    
    # get the index and coordinates of sampled points
    sampled_idx = tf.random_uniform(shape=(batch_size, npoint),maxval=npoint-1,dtype=tf.int32)
    new_xyz = gather_point(xyz, sampled_idx) # (batch_size, npoint, 3)
    
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    # grouping:
    idx_ = tf.range(batch_size) * num_points
    idx_ = tf.reshape(idx_, [batch_size, 1, 1])

    points = tf.reshape(points, [-1, num_dims])
    new_points = tf.gather(points, idx + idx_)
    
    # get the sampled points as centroids with xyz+feature for coord correction
    sampled_idx = tf.expand_dims(sampled_idx, -1)
    sampled_points = tf.gather(points, sampled_idx + idx_)

    # coord correction
    new_points -= sampled_points

    # get the new xyz set for sampled points and neighbors
    xyz_shape = xyz.get_shape()
    batch_size = xyz_shape[0].value
    num_points = xyz_shape[1].value
    num_dims = xyz_shape[-1].value

    idx_ = tf.range(batch_size) * num_points
    idx_ = tf.reshape(idx_, [batch_size, 1, 1])
    
    xyz_reshaped = tf.reshape(xyz, [-1, num_dims])
    grouped_xyz = tf.gather(xyz_reshaped, idx + idx_)
  
    grouped_xyz -= tf.expand_dims(new_xyz, 2) # translation normalization

    return new_xyz, new_points, idx, grouped_xyz
예제 #7
0
def pc_sampling(xyz, feat, nsample, num_point, scope='sampling'):
    """ Fully connected layer with non-linear operation.
  
  Args:
    xyz: 3-D tensor B x N x 3
    nsample: k
    num_point: N2
    feat: 3-D tensor B x N x C
  
  Returns:
    feat_sample: 3-D tensor B x N2 x C
  """
    with tf.variable_scope(scope) as sc:
        xyz_new = gather_point(xyz, farthest_point_sample(num_point, xyz))
        _, idx_pooling = knn_point(nsample, xyz, xyz_new)

        grouped_points = group_point(feat, idx_pooling)
        feat_sample = tf.nn.max_pool(grouped_points, [1, 1, nsample, 1],
                                     [1, 1, 1, 1],
                                     padding='VALID',
                                     data_format='NHWC',
                                     name="MAX_POOLING")
        feat_sample = tf.squeeze(feat_sample, axis=[2])

        return feat_sample, xyz_new
예제 #8
0
def sample_and_group_ByKeypoints(keypoints_xyz, nsample, xyz, sn, is_training):
    '''
    Input:
        keypoints_xyz:BxMx3
        nsample: int32
        xyz: bxNx3 
    Return:
        grouped_augmented:BxMxnsamplex3
        idx:BxMxnsample int 
        grouped_xyz:#BxMxnsamplex3
        grouped_xyz_center:BxMx3
    '''

    _, idx = knn_point(nsample, xyz, keypoints_xyz)
    grouped_xyz = group_point(xyz, idx)  #BxMxnsamplex3
    grouped_xyz_center = tf.reduce_mean(grouped_xyz, axis=2)  #BxMx3
    grouped_xyz_decentered = grouped_xyz - tf.tile(
        tf.expand_dims(grouped_xyz_center, 2), [1, 1, nsample, 1])
    if sn is not None:
        grouped_sn = group_point(sn, idx)  #BxMxnsamplex3
        grouped_augmented = tf.concat([grouped_xyz_decentered, grouped_sn],
                                      -1)  #BxMxnsamplex6
    else:
        grouped_augmented = grouped_xyz_decentered  #BxMxnsamplex3

    return grouped_augmented, idx, grouped_xyz, grouped_xyz_center
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def sample_and_group_layer1(npoint,
                            radius,
                            nsample,
                            xyz,
                            points,
                            knn=False,
                            use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    #tf_ops/samples/tf_sampling.py
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3),挑选满足条件的512个像素
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz,
                                        new_xyz)  # 提取512个点index每个点分别属于32个簇之一
    grouped_xyz = group_point(
        xyz, idx)  # (batch_size, npoint, nsample, 3) #提取512个像素每个像素分别属于32个簇之一
    grouped_xyz -= tf.tile(
        tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]
    )  # translation normalization 首先增加1维,(bs,512,3)->(bs,512,1,3),第三维张量扩充32倍->(bs,512,32,3),这里假定npoints=512,nsamples=32
    kernel = tf.Variable(tf.random_normal([32, 16, 3], stddev=0.1, seed=1),
                         name='kernel')
    tf.add_to_collection("kernel", kernel)
    # kernel = tf.convert_to_tensor(kernel)
    kc_points = kernel_correlation(
        grouped_xyz, kernel,
        0.005)  # KC module ==>(b,l,n)===>(BS, npoint, 1, l)
    kc_points = tf.transpose(kc_points, perm=[0, 2, 1])
    kc_points = tf.tile(tf.expand_dims(kc_points, 2), [1, 1, nsample, 1])
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:  #是否考虑原始xyz空间信息
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz
    new_points = tf.concat([kc_points, new_points], axis=-1)

    return new_xyz, new_points, idx, grouped_xyz
def set_upconv_module(xyz1, xyz2, feat1, feat2, nsample, mlp, mlp2, is_training,
                      scope, bn_decay=None, bn=True, pooling='max', radius=None,
                      knn=True):
    """
        Feature propagation from xyz2 (less points) to xyz1 (more points)

    Inputs:
        xyz1: (batch_size, npoint1, 3)
        xyz2: (batch_size, npoint2, 3)
        feat1: (batch_size, npoint1, channel1) features for xyz1 points (earlier layers)
        feat2: (batch_size, npoint2, channel2) features for xyz2 points
    Output:
        feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3)

        TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.
    """
    with tf.variable_scope(scope) as sc:
        if knn:
            l2_dist, idx = knn_point(nsample, xyz2, xyz1)
        else:
            idx, pts_cnt = query_ball_point(radius, nsample, xyz2, xyz1)

        xyz2_grouped = group_point(xyz2, idx)  # batch_size, npoint1, nsample, 3
        xyz1_expanded = tf.expand_dims(xyz1, 2)  # batch_size, npoint1, 1, 3
        xyz_diff = xyz2_grouped - xyz1_expanded  # batch_size, npoint1, nsample, 3

        feat2_grouped = group_point(feat2,
                                    idx)  # batch_size, npoint1, nsample, channel2
        net = tf.concat([feat2_grouped, xyz_diff],
                        axis=3)  # batch_size, npoint1, nsample, channel2+3

        if mlp is None: mlp = []
        for i, num_out_channel in enumerate(mlp):
            net = tf_util.conv2d(net, num_out_channel, [1, 1],
                                 padding='VALID', stride=[1, 1],
                                 bn=True, is_training=is_training,
                                 scope='conv%d' % (i), bn_decay=bn_decay)
        if pooling == 'max':
            feat1_new = tf.reduce_max(net, axis=[2], keep_dims=False,
                                      name='maxpool')  # batch_size, npoint1, mlp[-1]
        elif pooling == 'avg':
            feat1_new = tf.reduce_mean(net, axis=[2], keep_dims=False,
                                       name='avgpool')  # batch_size, npoint1, mlp[-1]

        if feat1 is not None:
            feat1_new = tf.concat([feat1_new, feat1],
                                  axis=2)  # batch_size, npoint1, mlp[-1]+channel1

        feat1_new = tf.expand_dims(feat1_new,
                                   2)  # batch_size, npoint1, 1, mlp[-1]+channel2
        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp2):
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1, 1],
                                       padding='VALID', stride=[1, 1],
                                       bn=True, is_training=is_training,
                                       scope='post-conv%d' % (i),
                                       bn_decay=bn_decay)
        feat1_new = tf.squeeze(feat1_new, [2])  # batch_size, npoint1, mlp2[-1]
        return feat1_new
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True,
                     centralize_points=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    fpsidx = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, fpsidx)  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)

        if centralize_points:
            central_points = gather_point(points[:, :, :3], fpsidx)
            grouped_points = tf.concat((grouped_points[:, :, :, :3] - tf.tile(
                tf.expand_dims(central_points, 2), [1, 1, nsample, 1]),
                                        grouped_points[:, :, :, 3:]), -1)

        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
예제 #13
0
def get_uniform_loss_knn(pred):
    var, _ = knn_point(6, pred, pred)
    mean = tf.reduce_mean(var, axis=2)
    _, variance = tf.nn.moments(mean, axes=[1])
    variance1 = tf.reduce_sum(variance)
    _, var = tf.nn.moments(var, axes=[2])
    var = tf.reduce_sum(var)
    variance2 = tf.reduce_sum(var)
    return variance1 + variance2
예제 #14
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    Workflow:
        Find the <npoint> down-sampled farest points by <farthest_point_sample>
        For each down-sampled point, find <nsample> sub-group points by <query_ball_point>
    '''

    new_xyz = gather_point(
        xyz, farthest_point_sample(npoint, xyz)
    )  # (batch_size, npoint, 3)  the points sampled with farest distance
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        # [batch_size,npoint,nsample] [batch_size,npoint]
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1
                      ])  # translation normalization: minus the center point
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
예제 #15
0
def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, knn=True, use_xyz=True, use_nchw=False, rearrange=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    batch_size = xyz.get_shape()[0].value
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(nsample_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]

            if knn:
                _,idx = knn_point(nsample, xyz, new_xyz)
            else:
                idx, _ = query_ball_point(radius, nsample, xyz, new_xyz)

            #idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2])
            for j,num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                                                padding='VALID', stride=[1,1], bn=bn, is_training=is_training,
                                                scope='conv%d_%d'%(i,j), bn_decay=bn_decay, data_format=data_format)
            if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            if rearrange:
                new_points = tf.expand_dims(new_points, -1)
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        new_points_concat = tf.reshape(new_points_concat, [batch_size, npoint, -1])
        return new_xyz, new_points_concat
예제 #16
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
        sampled_idx: () TF tensor, idx for sampled points
    '''

    point_cloud_shape = points.get_shape()
    batch_size = point_cloud_shape[0].value
    sampled_idx = farthest_point_sample(npoint, xyz)
    # sampled_idx = tf.random_uniform(shape=(batch_size,npoint),maxval=npoint-1,dtype=tf.int32)

    new_xyz = gather_point(xyz, sampled_idx)  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.expand_dims(new_xyz, 2)  # translation normalization
    if points is not None:
        # grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        # print("grouped_points:", grouped_points.shape)
        # grouping:
        grouped_points = new_group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)

        print("grouped_points:", grouped_points.shape)
        new_points = grouped_points
    else:
        new_points = grouped_xyz

    print("[Group] points:", new_points.shape)
    return new_xyz, new_points, idx, grouped_xyz
def get_interpolated_rgb(xyz, point_cloud):

    _, idx = knn_point(3, point_cloud[:, :, :3], xyz)
    grouped_xyz_rgb = group_point(point_cloud, idx) # (batch size, 2048, 3, 6)

    dist = ((tf.expand_dims(xyz, 2) - grouped_xyz_rgb[:, :, :, :3])**2)
    dist = tf.maximum(dist, 1e-10)

    norm = tf.reduce_sum((1/dist), axis=2) # (batch, 2048, 3)

    weighted_rgb = (1/dist)*grouped_xyz_rgb[:, :, :, 3:]
    weighted_norm_rgb = tf.reduce_sum(weighted_rgb, 2)/norm

    return weighted_norm_rgb, dist, grouped_xyz_rgb
예제 #18
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    #tf_ops/samples/tf_sampling.py
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3),挑选满足条件的512个像素
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz,
                                        new_xyz)  # 提取512个点index每个点分别属于32个簇之一
    grouped_xyz = group_point(
        xyz, idx)  # (batch_size, npoint, nsample, 3) #提取512个像素每个像素分别属于32个簇之一
    grouped_xyz -= tf.tile(
        tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]
    )  # translation normalization 首先增加1维,(bs,512,3)->(bs,512,1,3),第三维张量扩充32倍->(bs,512,32,3),这里假定npoints=512,nsamples=32
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:  #是否考虑原始xyz空间信息
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        if np.isscalar(radius):
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
        else:
            idx_list = []
            for radius_one, xyz_one, new_xyz_one in zip(tf.unstack(radius,axis=0), tf.unstack(xyz, axis=0),tf.unstack(new_xyz, axis=0)):
                idx_one, _ = query_ball_point(radius_one, nsample, tf.expand_dims(xyz_one, axis=0), tf.expand_dims(new_xyz_one, axis=0))
                idx_list.append(idx_one)
            idx = tf.stack(idx_list, axis=0)
            idx = tf.squeeze(idx, axis=1)

    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]),grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
            new_points = tf.concat([grouped_xyz, grouped_points],axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        # new_points =  tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])], axis=-1)
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32 = 1024
        radius: float32 = 0.5,1,2,4
        nsample: int32 = 16
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    new_xyz = gather_point(xyz, farthest_point_sample(npoint,
                                                      xyz))  ### Sampling using farthest point sampling
    # import ipdb; ipdb.set_trace()
    print ('check for seg fault')

    # xyz.shape
    # TensorShape([Dimension(4), Dimension(2048), Dimension(3)])
    # new_xyz.shape
    # TensorShape([Dimension(4), Dimension(1024), Dimension(3)])
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)

    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz,
                                        new_xyz)  ### Grouping using ball query
    grouped_xyz = group_point(xyz,
                              idx)  # (batch_size, npoint, nsample, 3)  ### Resulting grouped coordinates
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  ### translation normalization
    if points is not None:
        grouped_points = group_point(points,
                                     idx)  # (batch_size, npoint, nsample, channel)   ### Resulting grouped features
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points],
                                   axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
def softmax_embedding(xyz1, xyz2, feat1, feat2,
                      radius, nsample,
                      mlp,
                      is_training, bn_decay, scope,
                      bn=True, knn=True, corr_func='concat'):

    if knn:
        _, idx = knn_point(nsample, xyz2, xyz1)
    else:
        idx, _ = query_ball_point(radius, nsample, xyz2, xyz1)
    xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint, nsample, 3
    xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint, 1, 3
    xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint, nsample, 3

    feat2_grouped = group_point(feat2, idx) # batch_size, npoint, nsample, channel
    feat1_expanded = tf.expand_dims(feat1, 2) # batch_size, npoint, 1, channel
    feat_diff = feat2_grouped - feat1_expanded

    feat_diff = tf.concat(axis=-1, values=[feat_diff, feat2_grouped, tf.tile(feat1_expanded,[1,1,nsample,1])]) # batch_size, npoint, nsample, channel*2


    feat1_new = tf.concat([feat_diff, xyz_diff], axis=3) # batch_size, npoint, nsample, [channel or 1] + 3
    # TODO: move scope to outer indent
    o=[]
    with tf.variable_scope(scope) as sc:
        for i, num_out_channel in enumerate(mlp):
            activation_fn = tf.nn.relu if i < len(mlp)-1 else None
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
                                       padding='VALID', stride=[1,1],
                                       bn=True, is_training=is_training, #activation_fn=activation_fn,
                                       scope='conv_diff_%d'%(i), bn_decay=bn_decay)
            o.append(feat1_new)
    feat1_new += 0.00001

    feat1_new = tf.squeeze(feat1_new, [3]) # batch_size, npoint1, nsample

    square = feat1_new #tf.square(feat1_new)
    sm = square / tf.expand_dims(tf.reduce_sum(square, axis=-1), axis=-1)
    # sm = tf.nn.softmax(feat1_new) # batch_size, npoint1, nsample
    sm = tf.expand_dims(sm, axis=-1)

    flow_new = xyz_diff * sm # batch_size, npoint, nsample, 3
    flow_new = tf.reduce_sum(flow_new, axis=-2) # batch_size, npoint, 3

    # feat_new = feat2_grouped * sm # batch_size, npoint, nsample, channel
    # feat_new = tf.reduce_sum(feat_new, axis=-2) # batch_size, npoint, channel

    return xyz1, flow_new, sm, idx,o, feat_diff, xyz_diff
예제 #22
0
    def sample_and_group(self, xyz, points):
        '''
        Input:
            npoint: int32
            radius: float32
            nsample: int32
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
            knn: bool, if True use kNN instead of radius search
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Output:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
            idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
            grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
                (subtracted by seed point XYZ) in local regions
        '''
        # 每个batch取样的最远的1024个点
        # new_xyz: b * npoints * 3
        new_xyz = gather_point(xyz, farthest_point_sample(
            self.npoint, xyz))  # (batch_size, npoint, 3)
        if self.knn:
            _, idx = knn_point(self.nsample, xyz, new_xyz)
        else:
            # idx: (batch_size, npoint, nsample) int32 array, indices to input points
            # pts_cnt: (batch_size, npoint) int32 array, number of unique points in each local region
            idx, pts_cnt = query_ball_point(self.radius, self.nsample, xyz,
                                            new_xyz)
        # grouped_xyz: (batch_size, npoint, nsample, channel)
        # according to idx return corresponding chanel
        grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
        # move the points to the center (by minusing the coordinate of the center)
        grouped_xyz -= tf.tile(tf.expand_dims(
            new_xyz, 2), [1, 1, self.nsample, 1])  # translation normalization
        if points is not None:
            grouped_points = group_point(
                points, idx)  # (batch_size, npoint, nsample, channel)
            if self.use_xyz:
                new_points = tf.concat(
                    [grouped_xyz, grouped_points],
                    axis=-1)  # (batch_size, npoint, nample, 3+channel)
            else:
                new_points = grouped_points
        else:
            new_points = grouped_xyz

        return new_xyz, new_points, idx, grouped_xyz
예제 #23
0
def sample_and_group_polygons(npoint, nsample, xyz, points):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    # find seed points
    new_xyz_idx = farthest_point_sample(npoint, xyz)  # (batch_size, npoint)
    new_xyz = gather_point(xyz, new_xyz_idx)  # (batch_size, npoint, 3)

    # find nearest neighbors and group points
    _, idx = knn_point(nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = tf.concat((grouped_xyz, tf.expand_dims(new_xyz, axis=2)),
                            axis=2)  # (batch_size, npoint, nsample+1, 3)

    # concatenate the indices of the neighbors to the seed indices
    idx = tf.concat((idx, tf.expand_dims(new_xyz_idx, axis=-1)), axis=-1)

    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample+1, channel)
        new_points = tf.concat(
            [grouped_xyz, grouped_points],
            axis=-1)  # (batch_size, npoint, nample+1, 3+channel)
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, grouped_xyz
예제 #24
0
def sample_and_group(npoint, radius, nsample, xyz, points, global_idx, knn=False, use_xyz=True):
    fps_idx, g_idx = farthest_point_sample(npoint, xyz, global_idx)
    new_xyz = gather_point(xyz, fps_idx) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    center_xyz = tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])
    diff_xyz = grouped_xyz - center_xyz # translation normalization
    euclid_dist = tf.norm(diff_xyz, axis=-1, keep_dims=True)
    grouped_xyz = tf.concat([grouped_xyz, center_xyz, diff_xyz, euclid_dist], axis=-1)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([diff_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = diff_xyz
    return new_xyz, new_points, fps_idx, g_idx, diff_xyz
예제 #25
0
def sample_edge(npoint, radius, nsample, xyz, points):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points

    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    new_xyz = gather_edge(xyz, farthest_point_sample(npoint, xyz))
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    if knn:
        #print('using knn')
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    #print('grouped points',grouped_xyz.shape)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz
    #print('new points',new_points.shape)
    return new_xyz, new_points, idx, grouped_xyz
예제 #26
0
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    #npoint个采样点,每个采样点周围采样nsample个点 #sampling layer
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    # grouping layer
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else: #idx应该是用数字表示是第几个shape的第几个点周围的neightbor,也就是一个sample内部的序号
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) #把sample的内部序号和点的序号对应的坐标组合起来
    #每个点和中心点的相对距离
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization 对每个npoint插入一维,大小是nsample,值和中间点一样。
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)#points是一些featrue,xyz才是坐标
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) 
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz #中心点,分组后的点(可能包含channel),idx和分组后的xyz
예제 #27
0
def sample_and_group_all_polygons(nsample, xyz, points):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    ndataset = xyz.get_shape()[1].value

    # find nearest neighbors and group points
    _, idx = knn_point(nsample, xyz, xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, ndataset, nsample, 3)
    grouped_xyz = tf.concat((tf.expand_dims(xyz, axis=2), grouped_xyz),
                            axis=2)  # (batch_size, ndataset, nsample+1, 3)

    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, ndataset, nsample, channel)
        grouped_points = tf.concat(
            (tf.expand_dims(points, axis=2), grouped_points),
            axis=2)  # (batch_size, ndataset, nsample+1, channel)
        new_points = tf.concat(
            [grouped_xyz, grouped_points],
            axis=-1)  # (batch_size, npoint, nample+1, 3+channel)
    else:
        new_points = grouped_xyz

    return xyz, new_points, grouped_xyz
예제 #28
0
파일: tf_util2.py 프로젝트: liruihui/Dis-PU
def pointnet_sa_module_msg3(gt,
                            pred,
                            npoint,
                            radius_list,
                            nsample_list,
                            mlp_list,
                            scope,
                            use_xyz=True,
                            use_nchw=False,
                            knn=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        p1_idx = farthest_point_sample(npoint, gt)
        source_gt = gather_point(gt, p1_idx)
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            if knn:
                _, idx_gt = knn_point(nsample, gt, source_gt)
            else:
                idx_gt, _ = query_ball_point(radius, nsample, gt, source_gt)
            grouped_gt = group_point(gt, idx_gt)  #b*n*k*3
            grouped_gt -= tf.tile(tf.expand_dims(source_gt, 2),
                                  [1, 1, nsample, 1])

            if knn:
                _, idx_pred = knn_point(nsample, pred, source_gt)
            else:
                idx_pred, _ = query_ball_point(radius, nsample, pred,
                                               source_gt)
            grouped_pred = group_point(pred, idx_pred)  # b*n*k*3
            grouped_pred -= tf.tile(tf.expand_dims(source_gt, 2),
                                    [1, 1, nsample, 1])

            grouped_points = tf.concat([grouped_gt, grouped_pred], axis=2)

            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = conv2d(grouped_points,
                                        num_out_channel, [1, 1],
                                        weight_decay=0,
                                        padding='VALID',
                                        stride=[1, 1],
                                        scope='conv%d_%d' % (i, j),
                                        activation_fn=tf.nn.leaky_relu)

            #new_points = tf.reduce_max(grouped_points, axis=[2]) #b*n*c
            new_points = tf.layers.max_pooling2d(grouped_points, [1, nsample],
                                                 strides=[1, nsample],
                                                 padding='VALID',
                                                 name='maxpool_%d' % i)

            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return source_gt, new_points_concat
예제 #29
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3)  B*N*3
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
        # idx (batch_size, npoint, nsample)  pts_cnt (batch_size, npoint)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)

    G_p = get_neigh_geo_feat(new_xyz, grouped_xyz, nsample)  # B * N * 32 *12
    # batch= xyz.get_shape()[0].value
    # one=tf.ones([batch,npoint],dtype=tf.float32)
    # V=tf.constant(4/3*math.pi*radius*radius*radius)
    #
    # D=tf.divide(tf.cast(pts_cnt,tf.float32),tf.cast(V,tf.float32)) # (batch_size, npoint) maybe need to be normalized
    # D=tf.expand_dims(D, -1)
    # D = tf.tile(tf.expand_dims(D, -1), [1, 1, nsample, 1])  # 16,1024,32,1
    #
    # G_p=tf.concat([G_p,D], axis=-1) # 16,10244,32,13

    # now, grouped_xyz is local coordinate
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization

    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz, G_p
예제 #30
0
def get_model(xyz, is_training, bn_decay=None, num_class=NUM_CLASSES):
    batch_size = xyz.get_shape()[0].value
    num_point = xyz.get_shape()[1].value

    nsample = 20
    G = 16
    taylor_channel = 5

    with tf.variable_scope('delta') as sc:
        _, idx = knn_point(nsample, xyz, xyz)

        grouped_xyz = group_point(xyz, idx)
        point_cloud_tile = tf.expand_dims(xyz, [2])
        point_cloud_tile = tf.tile(point_cloud_tile, [1, 1, nsample, 1])
        delta = grouped_xyz - point_cloud_tile

    with tf.variable_scope('fanConv1') as sc:
        feat_1 = tf_util.spiderConv(xyz,
                                    idx,
                                    delta,
                                    32,
                                    taylor_channel=taylor_channel,
                                    gn=True,
                                    G=G,
                                    is_multi_GPU=True)

    with tf.variable_scope('fanConv2') as sc:
        feat_2 = tf_util.spiderConv(feat_1,
                                    idx,
                                    delta,
                                    64,
                                    taylor_channel=taylor_channel,
                                    gn=True,
                                    G=G,
                                    is_multi_GPU=True)

    with tf.variable_scope('fanConv3') as sc:
        feat_3 = tf_util.spiderConv(feat_2,
                                    idx,
                                    delta,
                                    128,
                                    taylor_channel=taylor_channel,
                                    gn=True,
                                    G=G,
                                    is_multi_GPU=True)

    with tf.variable_scope('fanConv4') as sc:
        feat_4 = tf_util.spiderConv(feat_3,
                                    idx,
                                    delta,
                                    256,
                                    taylor_channel=taylor_channel,
                                    gn=True,
                                    G=G,
                                    is_multi_GPU=True)

    feat = tf.concat([feat_1, feat_2, feat_3, feat_4], 2)

    #top-k pooling
    net = tf_util.topk_pool(feat, k=2, scope='topk_pool')
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  1024,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay,
                                  is_multi_GPU=True)
    net = tf_util.dropout(net,
                          keep_prob=0.3,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay,
                                  is_multi_GPU=True)
    net = tf_util.dropout(net,
                          keep_prob=0.3,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net,
                                  num_class,
                                  activation_fn=None,
                                  scope='fc3')

    return net
예제 #31
0
def get_model(xyz_withnor, is_training, bn_decay=None, num_classes=40):
    batch_size = xyz_withnor.get_shape()[0].value
    num_point = xyz_withnor.get_shape()[1].value

    K_knn = 20
    taylor_channel = 3

    xyz = xyz_withnor[:, :, 0:3]

    with tf.variable_scope('delta') as sc:
        _, idx = knn_point(K_knn, xyz, xyz)

        grouped_xyz = group_point(xyz, idx)
        point_cloud_tile = tf.expand_dims(xyz, [2])
        point_cloud_tile = tf.tile(point_cloud_tile, [1, 1, K_knn, 1])
        delta = grouped_xyz - point_cloud_tile

    with tf.variable_scope('SpiderConv1') as sc:
        feat_1 = tf_util.spiderConv(xyz_withnor,
                                    idx,
                                    delta,
                                    32,
                                    taylor_channel=taylor_channel,
                                    bn=True,
                                    is_training=is_training,
                                    bn_decay=bn_decay)

    with tf.variable_scope('SpiderConv2') as sc:
        feat_2 = tf_util.spiderConv(feat_1,
                                    idx,
                                    delta,
                                    64,
                                    taylor_channel=taylor_channel,
                                    bn=True,
                                    is_training=is_training,
                                    bn_decay=bn_decay)

    with tf.variable_scope('SpiderConv3') as sc:
        feat_3 = tf_util.spiderConv(feat_2,
                                    idx,
                                    delta,
                                    128,
                                    taylor_channel=taylor_channel,
                                    bn=True,
                                    is_training=is_training,
                                    bn_decay=bn_decay)

    with tf.variable_scope('SpiderConv4') as sc:
        feat_4 = tf_util.spiderConv(feat_3,
                                    idx,
                                    delta,
                                    256,
                                    taylor_channel=taylor_channel,
                                    bn=True,
                                    is_training=is_training,
                                    bn_decay=bn_decay)

    feat = tf.concat([feat_1, feat_2, feat_3, feat_4], 2)

    #top-k pooling
    net = tf_util.topk_pool(feat, k=2, scope='topk_pool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net