Esempio n. 1
0
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        if np.isscalar(radius):
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            tf.summary.histogram('pts_cnt', pts_cnt)
        else:
            idx_list = []
            for radius_one, xyz_one, new_xyz_one in zip(tf.unstack(radius,axis=0), tf.unstack(xyz, axis=0),tf.unstack(new_xyz, axis=0)):
                idx_one, pts_cnt = query_ball_point(radius_one, nsample, tf.expand_dims(xyz_one, axis=0), tf.expand_dims(new_xyz_one, axis=0))
                idx_list.append(idx_one)
            idx = tf.stack(idx_list, axis=0)
            idx = tf.squeeze(idx, axis=1)

    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]),grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
            new_points = tf.concat([grouped_xyz, grouped_points],axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        # new_points =  tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])], axis=-1)
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Esempio n. 2
0
def set_abstraction_msg(xyz, points, npoint, radius_list, nsample_list,
                        mlp_list, is_training, use_nchw):
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    new_points_list = []
    for i in range(len(radius_list)):
        radius = radius_list[i]
        nsample = nsample_list[i]
        group_idx = query_ball_point(radius, nsample, xyz, new_xyz)
        grouped_xyz = group_point(xyz, group_idx[0])
        grouped_xyz -= K.tile(
            Lambda(lambda x: K.expand_dims(x, axis=2))(new_xyz),
            [1, 1, nsample, 1])
        if points is not None:
            grouped_points = group_point(points, group_idx[0])
            grouped_points = Lambda(lambda x: K.concatenate(x, axis=-1))(
                [grouped_points, grouped_xyz])
        else:
            grouped_points = grouped_xyz
        if use_nchw:
            grouped_points = Lambda(lambda x: K.permute_dimensions(
                x, [0, 3, 1, 2]))(grouped_points)
        for j, num_out_channel in enumerate(mlp_list[i]):
            grouped_points = Conv2D(num_out_channel, 1,
                                    activation="relu")(grouped_points)
            grouped_points = BatchNormalization()(grouped_points,
                                                  training=is_training)
        if use_nchw:
            grouped_points = Lambda(lambda x: K.permute_dimensions(
                x, [0, 2, 3, 1]))(grouped_points)
        new_points = Lambda(lambda x: K.max(x, axis=2))(grouped_points)
        new_points_list.append(new_points)
    new_points_concat = Lambda(lambda x: K.concatenate(x, axis=-1))(
        new_points_list)
    return new_xyz, new_points_concat
Esempio n. 3
0
def get_perulsion_loss(pred, nsample=15, radius=0.07, knn=False, numpoint=4096, use_l1=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred ** 2, axis=-1)
    if use_l1:
        dists = tf.sqrt(dists+1e-12)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(0.001)*2
    else:
        h = 0.001
    print "h is ",h
    val = tf.maximum(0.0, h + val) # dd/np.sqrt(n)
    perulsion_loss = tf.reduce_mean(val)
    return perulsion_loss
Esempio n. 4
0
def get_smooth_and_uniform_loss(pred,
                                normal,
                                nsample=20,
                                radius=0.07,
                                knn=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one
    val = tf.maximum(0.0, 0.001 + val)
    uniform_loss = tf.reduce_mean(val)

    # idx = idx[:, :, 1:]  # (batch_size, npoint, 4)
    # batch_size = pred.get_shape()[0].value
    # nPoints = pred.get_shape()[1].value
    # grouped_pred_reshape = tf.reshape(grouped_pred, (-1, 3))
    # indics = tf.reshape(tf.range(batch_size*nPoints), (batch_size*nPoints, 1)) * nsample + tf.reshape(idx,[batch_size*nPoints,-1])
    # grouped_pred = tf.gather(grouped_pred_reshape, indics)
    # grouped_pred = tf.reshape(grouped_pred,(batch_size,nPoints,4,-1))
    # grouped_pred = tf.nn.l2_normalize(grouped_pred, dim=-1)
    # inner_product = tf.abs(tf.reduce_sum(grouped_pred * tf.expand_dims(normal, axis=2), axis=-1))  # (batch_size, npoint,nsample)
    # smooth_loss = tf.reduce_mean(inner_product)
    return uniform_loss, 0
Esempio n. 5
0
def get_uniform_loss(pcd,
                     percentages=[0.004, 0.006, 0.008, 0.010, 0.012],
                     radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        disk_area = math.pi * (radius**2) * p / nsample
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        #expect_len =  tf.sqrt(2*disk_area/1.732)#using hexagon
        expect_len = tf.sqrt(disk_area)  # using square

        grouped_pcd = group_point(pcd, idx)
        grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)

        var, _ = knn_point(2, grouped_pcd, grouped_pcd)
        uniform_dis = -var[:, :, 1:]
        uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8))
        uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1])
        uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
        uniform_dis = tf.reshape(uniform_dis, [-1])

        mean, variance = tf.nn.moments(uniform_dis, axes=0)
        mean = mean * math.pow(p * 100, 2)
        #nothing 4
        loss.append(mean)
    return tf.add_n(loss) / len(percentages)
Esempio n. 6
0
def get_repulsion_loss(pred,
                       nsample=20,
                       radius=0.07,
                       knn=False,
                       use_l1=False,
                       h=0.001):

    if knn:
        _, idx = knn_point_2(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    # get the uniform loss
    if use_l1:
        dists = tf.reduce_sum(tf.abs(grouped_pred), axis=-1)
    else:
        dists = tf.reduce_sum(grouped_pred**2, axis=-1)

    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(h) * 2
    print(("h is ", h))

    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    repulsion_loss = tf.reduce_mean(val)
    return repulsion_loss
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Esempio n. 8
0
def get_perulsion_loss1_orthdistance(pred, normal, nsample=15, radius=0.07, knn=False, numpoint=4096,use_l1=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    offset = grouped_pred-tf.expand_dims(pred, 2)
    normal = tf.expand_dims(normal,axis=2)
    dists = offset -tf.reduce_sum(normal*offset,axis=-1,keep_dims=True)*normal

    dists = tf.reduce_sum(dists ** 2, axis=-1)
    if use_l1:
        dists = tf.sqrt(dists+1e-12)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    h = (2.0 / np.sqrt(numpoint)) ** 2
    if use_l1:
        h = np.sqrt(0.001)*2
    else:
        h = 0.001
    print "h is ", h

    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    uniform_loss = tf.reduce_mean(val)

    return 20*uniform_loss
Esempio n. 9
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    '''
    input data: xyz.shape[0] (N)*(d+C);
    a set of centroids: npoint (N')*d
    neighbors: nsample (K)*(d+C)
    farthest_point_sample output npoint's index.
    gather_point: output npoint 's data according to index and input data
    '''
    # aaa = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3)
    # print('xys:', new_xyz.get_shape())
    # print('new_xyz in s g:', new_xyz.get_shape(), 'npoint:', npoint)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        # K‘ flexiable, but less than nsample, paper not refered
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
Esempio n. 10
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           ibn=False,
                           use_xyz=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.expand_dims(new_xyz, 2)
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util2.conv2d(grouped_points,
                                                 num_out_channel, [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 ibn=ibn,
                                                 is_training=is_training,
                                                 scope='conv%d_%d' % (i, j),
                                                 bn_decay=bn_decay)
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Esempio n. 11
0
def query_and_group_points(xyz,
                           points,
                           new_xyz,
                           nsample,
                           radius,
                           knn=False,
                           use_xyz=True,
                           normalize_radius=True,
                           orientations=None):

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
        pts_cnt = nsample  # Hack. By right should make sure number of input points < nsample
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    tf.summary.histogram('pts_cnt', pts_cnt)

    # Group XYZ coordinates
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = grouped_xyz - tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if normalize_radius:
        grouped_xyz /= radius  # Scale normalization
    # 2D-rotate via orientations if necessary
    if orientations is not None:
        cosval = tf.expand_dims(tf.cos(orientations), axis=2)
        sinval = tf.expand_dims(tf.sin(orientations), axis=2)
        grouped_xyz = tf.stack([
            cosval * grouped_xyz[:, :, :, 0] +
            sinval * grouped_xyz[:, :, :, 1],
            -sinval * grouped_xyz[:, :, :, 0] +
            cosval * grouped_xyz[:, :, :, 1], grouped_xyz[:, :, :, 2]
        ],
                               axis=3)

    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:

            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_points, idx
Esempio n. 12
0
def get_repulsion_loss4(pred, nsample=20, radius=0.07):
    # pred: (batch_size, npoint,3)
    idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    h = 0.03
    dist_square = tf.reduce_sum(grouped_pred ** 2, axis=-1)
    dist_square, idx = tf.nn.top_k(-dist_square, 5)
    dist_square = -dist_square[:, :, 1:]  # remove the first one
    dist_square = tf.maximum(1e-12,dist_square)
    dist = tf.sqrt(dist_square)
    weight = tf.exp(-dist_square/h**2)
    uniform_loss = tf.reduce_mean(radius-dist*weight)
    return uniform_loss
Esempio n. 13
0
def get_uniform_loss2(pred, nsample=20, radius=0.07, knn=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one
    uniform_loss = tf.reduce_mean(tf.exp(val / 0.03**2))
    return 0.2 * uniform_loss
    def test_grad(self):
        with tf.device('/gpu:0'):
            points = tf.constant(
                np.random.random((1, 128, 16)).astype('float32'))
            print(points)
            xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
            xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
            radius = 0.3
            nsample = 32
            idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2)
            grouped_points = group_point(points, idx)
            print(grouped_points)

        with self.test_session():
            print("---- Going to compute gradient error")
            err = tf.test.compute_gradient_error(points, (1, 128, 16),
                                                 grouped_points,
                                                 (1, 8, 32, 16))
            print(err)
            self.assertLess(err, 1e-4)
Esempio n. 15
0
def get_uniform_loss2(
        pcd,
        percentages=[0.002, 0.004, 0.006, 0.008, 0.010, 0.012, 0.015],
        radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        uniform_val = tf.py_func(py_uniform_loss, [pcd, idx, pts_cnt, r],
                                 tf.float32)

        loss.append(uniform_val * math.sqrt(p * 100))
    return tf.add_n(loss) / len(percentages)
Esempio n. 16
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                ####################################
                grouped_points = Ops.xxlu(Ops.conv2d(grouped_points,
                                                     k=(1, 1),
                                                     out_c=num_out_channel,
                                                     str=1,
                                                     pad='VALID',
                                                     name='lll' + str(i)),
                                          label='lrelu')
                #grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                #padding='VALID', stride=[1,1], bn=bn, is_training=is_training,scope='conv%d_%d'%(i,j), bn_decay=bn_decay)

            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
Esempio n. 17
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True,
                     keypoints=None,
                     orientations=None,
                     normalize_radius=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        keypoints: None or tensor with shape [None, None, 3], containing the xyz of keypoints.
                   If provided, npoint will be ignored, and iterative furthest sampling will be skipped
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor, i.e. cluster center (dim=3)
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor (dim=3+c, first 3 dimensions are normalized XYZ)
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions. This is usually the first 3 dimensions of new_points
    '''

    end_points = {}

    if keypoints is not None:
        new_xyz = keypoints
    else:
        new_xyz = gather_point(xyz, farthest_point_sample(
            npoint, xyz))  # (batch_size, npoint, 3)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
        pts_cnt = nsample  # Hack. By right should make sure number of input points < nsample
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = grouped_xyz - tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if normalize_radius:
        grouped_xyz /= radius

    end_points['grouped_xyz_before'] = grouped_xyz

    # 2D-rotate via orientations if necessary
    if orientations is not None:
        cosval = tf.cos(orientations)
        sinval = tf.sin(orientations)
        one = tf.ones_like(cosval)
        zero = tf.zeros_like(cosval)
        R = tf.stack([(cosval, sinval, zero), (-sinval, cosval, zero),
                      (zero, zero, one)],
                     axis=0)
        R = tf.transpose(R, perm=[2, 3, 0, 1])
        grouped_xyz = tf.matmul(grouped_xyz, R)
        end_points['rotation'] = R

    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    end_points['grouped_xyz'] = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz, end_points
Esempio n. 18
0
def get_smooth_loss(pred,
                    normal,
                    nsample=20,
                    radius=0.05,
                    knn=False,
                    selected=True,
                    re_weight=False,
                    grouping=None):
    # pred: (batch_size, npoint,3)
    # normal : (batch_size,npoint,3)
    if selected:
        radius = 1.0 * radius
        nsample = int(1.0 * nsample)
    # first get some neighborhood points
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    idx = idx[:, :, 1:]
    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    grouped_pred_normilize = tf.nn.l2_normalize(grouped_pred, dim=-1)
    inner_product = tf.abs(
        tf.reduce_sum(grouped_pred_normilize * tf.expand_dims(normal, axis=2),
                      axis=-1))  # (batch_size, npoint,nsample)
    if re_weight:
        alpha = 5
        inner_product = (tf.exp(alpha * inner_product) - 1) / (
            np.exp(alpha) - 1)  # (batch_size, npoint,nsample)
    if grouping == 'exp_weighted':
        epision = 1e-12
        dists = tf.norm(grouped_pred + epision,
                        axis=-1)  # (batch_size, npoint,nsample)
        dists = tf.maximum(dists, 1e-10)  # (batch_size, npoint,nsample)
        exp_dists = tf.exp(-dists * 20)  # (batch_size, npoint,nsample)
        weights = exp_dists / tf.reduce_sum(
            exp_dists, axis=2, keep_dims=True)  # (batch_size, npoint, nsample)
        tf.summary.histogram('smooth/weighted', weights)
        inner_product = weights * inner_product

    if selected:
        grouped_normal = group_point(normal, idx)
        mask = tf.to_float(
            tf.greater(
                tf.reduce_sum(grouped_normal * tf.expand_dims(normal, axis=2),
                              axis=-1), 0.0))
        tf.summary.histogram('smooth/mask1', tf.count_nonzero(mask, axis=-1))
        smooth_loss = tf.reduce_sum(mask * inner_product) / tf.reduce_sum(mask)
    else:
        smooth_loss = tf.reduce_mean(inner_product)

    return smooth_loss