def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
示例#2
0
def set_abstraction_msg(xyz, points, npoint, radius_list, nsample_list,
                        mlp_list, is_training, use_nchw):
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    new_points_list = []
    for i in range(len(radius_list)):
        radius = radius_list[i]
        nsample = nsample_list[i]
        group_idx = query_ball_point(radius, nsample, xyz, new_xyz)
        grouped_xyz = group_point(xyz, group_idx[0])
        grouped_xyz -= K.tile(
            Lambda(lambda x: K.expand_dims(x, axis=2))(new_xyz),
            [1, 1, nsample, 1])
        if points is not None:
            grouped_points = group_point(points, group_idx[0])
            grouped_points = Lambda(lambda x: K.concatenate(x, axis=-1))(
                [grouped_points, grouped_xyz])
        else:
            grouped_points = grouped_xyz
        if use_nchw:
            grouped_points = Lambda(lambda x: K.permute_dimensions(
                x, [0, 3, 1, 2]))(grouped_points)
        for j, num_out_channel in enumerate(mlp_list[i]):
            grouped_points = Conv2D(num_out_channel, 1,
                                    activation="relu")(grouped_points)
            grouped_points = BatchNormalization()(grouped_points,
                                                  training=is_training)
        if use_nchw:
            grouped_points = Lambda(lambda x: K.permute_dimensions(
                x, [0, 2, 3, 1]))(grouped_points)
        new_points = Lambda(lambda x: K.max(x, axis=2))(grouped_points)
        new_points_list.append(new_points)
    new_points_concat = Lambda(lambda x: K.concatenate(x, axis=-1))(
        new_points_list)
    return new_xyz, new_points_concat
示例#3
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    '''
    input data: xyz.shape[0] (N)*(d+C);
    a set of centroids: npoint (N')*d
    neighbors: nsample (K)*(d+C)
    farthest_point_sample output npoint's index.
    gather_point: output npoint 's data according to index and input data
    '''
    # aaa = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, farthest_point_sample(
        npoint, xyz))  # (batch_size, npoint, 3)
    # print('xys:', new_xyz.get_shape())
    # print('new_xyz in s g:', new_xyz.get_shape(), 'npoint:', npoint)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        # K‘ flexiable, but less than nsample, paper not refered
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
示例#4
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           ibn=False,
                           use_xyz=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.expand_dims(new_xyz, 2)
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            for j, num_out_channel in enumerate(mlp_list[i]):
                grouped_points = tf_util2.conv2d(grouped_points,
                                                 num_out_channel, [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 ibn=ibn,
                                                 is_training=is_training,
                                                 scope='conv%d_%d' % (i, j),
                                                 bn_decay=bn_decay)
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
示例#5
0
def subsample(points, feat, targetnum, kp_idx):
    if kp_idx is not None:
        kp_indices = kp_idx
    else:
        kp_indices = farthest_point_sample(targetnum, points)
        kp_indices = tf.expand_dims(kp_indices, 2)
    feat_sampled = group_point(feat, kp_indices)
    feat_sampled = tf.squeeze(feat_sampled, 2)
    xyz_sampled = group_point(points, kp_indices)
    xyz_sampled = tf.squeeze(xyz_sampled, 2)
    return xyz_sampled, feat_sampled, kp_indices
示例#6
0
def query_and_group_points(xyz,
                           points,
                           new_xyz,
                           nsample,
                           radius,
                           knn=False,
                           use_xyz=True,
                           normalize_radius=True,
                           orientations=None):

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
        pts_cnt = nsample  # Hack. By right should make sure number of input points < nsample
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    tf.summary.histogram('pts_cnt', pts_cnt)

    # Group XYZ coordinates
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = grouped_xyz - tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if normalize_radius:
        grouped_xyz /= radius  # Scale normalization
    # 2D-rotate via orientations if necessary
    if orientations is not None:
        cosval = tf.expand_dims(tf.cos(orientations), axis=2)
        sinval = tf.expand_dims(tf.sin(orientations), axis=2)
        grouped_xyz = tf.stack([
            cosval * grouped_xyz[:, :, :, 0] +
            sinval * grouped_xyz[:, :, :, 1],
            -sinval * grouped_xyz[:, :, :, 0] +
            cosval * grouped_xyz[:, :, :, 1], grouped_xyz[:, :, :, 2]
        ],
                               axis=3)

    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:

            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_points, idx
示例#7
0
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        if np.isscalar(radius):
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            tf.summary.histogram('pts_cnt', pts_cnt)
        else:
            idx_list = []
            for radius_one, xyz_one, new_xyz_one in zip(tf.unstack(radius,axis=0), tf.unstack(xyz, axis=0),tf.unstack(new_xyz, axis=0)):
                idx_one, pts_cnt = query_ball_point(radius_one, nsample, tf.expand_dims(xyz_one, axis=0), tf.expand_dims(new_xyz_one, axis=0))
                idx_list.append(idx_one)
            idx = tf.stack(idx_list, axis=0)
            idx = tf.squeeze(idx, axis=1)

    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            # new_points = tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]),grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
            new_points = tf.concat([grouped_xyz, grouped_points],axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        # new_points =  tf.concat([grouped_xyz, tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])], axis=-1)
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz
示例#8
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    indecies = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, indecies)  # (batch_size, npoint, 3)
    new_normals = gather_point(normals, indecies)  # (batch_size, npoint, 3)
    _, idx = knn_point(nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                           [1, 1, nsample, 1])  # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, new_normals, idx, grouped_xyz
示例#9
0
def get_smooth_and_uniform_loss(pred,
                                normal,
                                nsample=20,
                                radius=0.07,
                                knn=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one
    val = tf.maximum(0.0, 0.001 + val)
    uniform_loss = tf.reduce_mean(val)

    # idx = idx[:, :, 1:]  # (batch_size, npoint, 4)
    # batch_size = pred.get_shape()[0].value
    # nPoints = pred.get_shape()[1].value
    # grouped_pred_reshape = tf.reshape(grouped_pred, (-1, 3))
    # indics = tf.reshape(tf.range(batch_size*nPoints), (batch_size*nPoints, 1)) * nsample + tf.reshape(idx,[batch_size*nPoints,-1])
    # grouped_pred = tf.gather(grouped_pred_reshape, indics)
    # grouped_pred = tf.reshape(grouped_pred,(batch_size,nPoints,4,-1))
    # grouped_pred = tf.nn.l2_normalize(grouped_pred, dim=-1)
    # inner_product = tf.abs(tf.reduce_sum(grouped_pred * tf.expand_dims(normal, axis=2), axis=-1))  # (batch_size, npoint,nsample)
    # smooth_loss = tf.reduce_mean(inner_product)
    return uniform_loss, 0
示例#10
0
def get_perulsion_loss1_orthdistance(pred, normal, nsample=15, radius=0.07, knn=False, numpoint=4096,use_l1=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    offset = grouped_pred-tf.expand_dims(pred, 2)
    normal = tf.expand_dims(normal,axis=2)
    dists = offset -tf.reduce_sum(normal*offset,axis=-1,keep_dims=True)*normal

    dists = tf.reduce_sum(dists ** 2, axis=-1)
    if use_l1:
        dists = tf.sqrt(dists+1e-12)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    h = (2.0 / np.sqrt(numpoint)) ** 2
    if use_l1:
        h = np.sqrt(0.001)*2
    else:
        h = 0.001
    print "h is ", h

    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    uniform_loss = tf.reduce_mean(val)

    return 20*uniform_loss
示例#11
0
def get_perulsion_loss(pred, nsample=15, radius=0.07, knn=False, numpoint=4096, use_l1=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred ** 2, axis=-1)
    if use_l1:
        dists = tf.sqrt(dists+1e-12)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(0.001)*2
    else:
        h = 0.001
    print "h is ",h
    val = tf.maximum(0.0, h + val) # dd/np.sqrt(n)
    perulsion_loss = tf.reduce_mean(val)
    return perulsion_loss
示例#12
0
def get_uniform_loss(pcd,
                     percentages=[0.004, 0.006, 0.008, 0.010, 0.012],
                     radius=1.0):
    B, N, C = pcd.get_shape().as_list()
    npoint = int(N * 0.05)
    loss = []
    for p in percentages:
        nsample = int(N * p)
        r = math.sqrt(p * radius)
        disk_area = math.pi * (radius**2) * p / nsample
        #print(npoint,nsample)
        new_xyz = gather_point(pcd, farthest_point_sample(
            npoint, pcd))  # (batch_size, npoint, 3)
        idx, pts_cnt = query_ball_point(
            r, nsample, pcd, new_xyz)  #(batch_size, npoint, nsample)

        #expect_len =  tf.sqrt(2*disk_area/1.732)#using hexagon
        expect_len = tf.sqrt(disk_area)  # using square

        grouped_pcd = group_point(pcd, idx)
        grouped_pcd = tf.concat(tf.unstack(grouped_pcd, axis=1), axis=0)

        var, _ = knn_point(2, grouped_pcd, grouped_pcd)
        uniform_dis = -var[:, :, 1:]
        uniform_dis = tf.sqrt(tf.abs(uniform_dis + 1e-8))
        uniform_dis = tf.reduce_mean(uniform_dis, axis=[-1])
        uniform_dis = tf.square(uniform_dis - expect_len) / (expect_len + 1e-8)
        uniform_dis = tf.reshape(uniform_dis, [-1])

        mean, variance = tf.nn.moments(uniform_dis, axes=0)
        mean = mean * math.pow(p * 100, 2)
        #nothing 4
        loss.append(mean)
    return tf.add_n(loss) / len(percentages)
示例#13
0
def get_repulsion_loss(pred,
                       nsample=20,
                       radius=0.07,
                       knn=False,
                       use_l1=False,
                       h=0.001):

    if knn:
        _, idx = knn_point_2(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    # get the uniform loss
    if use_l1:
        dists = tf.reduce_sum(tf.abs(grouped_pred), axis=-1)
    else:
        dists = tf.reduce_sum(grouped_pred**2, axis=-1)

    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one

    if use_l1:
        h = np.sqrt(h) * 2
    print(("h is ", h))

    val = tf.maximum(0.0, h + val)  # dd/np.sqrt(n)
    repulsion_loss = tf.reduce_mean(val)
    return repulsion_loss
示例#14
0
def get_pca_loss(pred_edge):
    nsample = 10
    idx = knn_point(nsample, pred_edge, pred_edge)
    grouped_pred = group_point(pred_edge,idx) # (batch_size, npoint, nsample, 3)
    W = tf.get_variable('pca',shape=(3,1))


    return
示例#15
0
def group(xyz, points, k, dilation=1, use_xyz=False):
    _, idx = knn_point(k * dilation + 1, xyz, xyz)
    idx = idx[:, :, 1::dilation]

    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, k, 3)
    grouped_xyz -= tf.expand_dims(xyz, 2)  # translation normalization
    if points is not None:
        grouped_points = group_point(points,
                                     idx)  # (batch_size, npoint, k, channel)
        if use_xyz:
            grouped_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, k, 3+channel)
    else:
        grouped_points = grouped_xyz

    return grouped_xyz, grouped_points, idx
示例#16
0
def get_repulsion_loss4(pred, nsample=20, radius=0.07):
    # pred: (batch_size, npoint,3)
    idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    h = 0.03
    dist_square = tf.reduce_sum(grouped_pred ** 2, axis=-1)
    dist_square, idx = tf.nn.top_k(-dist_square, 5)
    dist_square = -dist_square[:, :, 1:]  # remove the first one
    dist_square = tf.maximum(1e-12,dist_square)
    dist = tf.sqrt(dist_square)
    weight = tf.exp(-dist_square/h**2)
    uniform_loss = tf.reduce_mean(radius-dist*weight)
    return uniform_loss
示例#17
0
def get_uniform_loss2(pred, nsample=20, radius=0.07, knn=False):
    # pred: (batch_size, npoint,3)
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    val = val[:, :, 1:]  # remove the first one
    uniform_loss = tf.reduce_mean(tf.exp(val / 0.03**2))
    return 0.2 * uniform_loss
示例#18
0
def res_gcn_d(xyz,
              points,
              k,
              n_cout,
              n_blocks,
              is_training,
              scope,
              bn_decay=None,
              use_bn=False,
              use_ibn=False,
              indices=None):
    with tf.variable_scope(scope):
        for idx in range(n_blocks):
            with tf.variable_scope('block_{}'.format(idx)):
                shortcut = points

                # Center Features
                points = batch_norm(points,
                                    is_training,
                                    'bn_center',
                                    bn_decay=bn_decay,
                                    use_bn=use_bn,
                                    use_ibn=use_ibn)
                points = tf.nn.leaky_relu(points)
                # Neighbor Features
                if idx == 0 and indices is None:
                    _, grouped_points, indices = group(xyz, points, k)
                else:
                    grouped_points = group_point(points, indices)
                # Center Conv
                center_points = tf.expand_dims(points, axis=2)
                points = conv2d(center_points, n_cout, name='conv_center')
                # Neighbor Conv
                grouped_points_nn = conv2d(grouped_points,
                                           n_cout,
                                           name='conv_neighbor')
                # CNN
                points = tf.reduce_mean(tf.concat([points, grouped_points_nn],
                                                  axis=2),
                                        axis=2) + shortcut

    return points
    def test_grad(self):
        with tf.device('/gpu:0'):
            points = tf.constant(
                np.random.random((1, 128, 16)).astype('float32'))
            print(points)
            xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
            xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
            radius = 0.3
            nsample = 32
            idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2)
            grouped_points = group_point(points, idx)
            print(grouped_points)

        with self.test_session():
            print("---- Going to compute gradient error")
            err = tf.test.compute_gradient_error(points, (1, 128, 16),
                                                 grouped_points,
                                                 (1, 8, 32, 16))
            print(err)
            self.assertLess(err, 1e-4)
示例#20
0
def get_uniform_loss_extra(pred, nPoints, up_ratio, radius):
    # pred: (batch_size, npoint,3)
    # normal : (batch_size,npoint,3)
    batch_size = pred.get_shape()[0].value

    idx1 = tf.reshape(tf.range(1, up_ratio),
                      (1, up_ratio - 1)) * nPoints  # (1, 4)
    idx2 = tf.reshape(tf.range(nPoints * up_ratio),
                      (nPoints * up_ratio, 1))  # (4096,1)
    idx = (idx1 + idx2) % (nPoints * up_ratio)
    idx = tf.tile(tf.expand_dims(idx, axis=0), (batch_size, 1, 1))

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    ##get the uniform loss
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val = tf.maximum(0.0,
                     tf.reshape(radius,
                                (-1, 1, 1)) * 0.002 - dists) / tf.expand_dims(
                                    tf.expand_dims(radius, axis=-1), axis=-1)
    uniform_loss = tf.reduce_mean(val)

    return uniform_loss, idx
示例#21
0
def lfnet_module(kernel,
                 scale,
                 interp,
                 fit,
                 xyz,
                 points,
                 normals,
                 axis_x,
                 axis_y,
                 xyz_feature,
                 npoint,
                 radius_list,
                 nsample_list,
                 mlp_list,
                 is_training,
                 bn_decay,
                 scope,
                 mlp=[64, 64],
                 bn=True,
                 use_xyz=False,
                 weight=None,
                 knn=0,
                 d=1,
                 end=False,
                 use_xyz_feature=True,
                 first_layer=False):
    ''' A-CNN module with rings
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            normals: (batch_size, ndataset, 3) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius_list: list of float32 -- search radiuses (inner and outer) represent ring in local region
            nsample_list: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    # data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        # if npoint == xyz.get_shape().as_list()[1] and knn==0:
        #     raise Exception('wrong input knn and npoint')
        if npoint != xyz.get_shape().as_list()[1]:
            indecies = farthest_point_sample(npoint, xyz)
            new_xyz = gather_point(xyz, indecies)  # (batch_size, npoint, 3)
            new_normals = gather_point(normals,
                                       indecies)  # (batch_size, npoint, 3)
            new_axis_x = gather_point(axis_x, indecies)
            new_axis_y = gather_point(axis_y, indecies)
        elif knn:
            new_xyz = xyz
            new_normals = normals
            new_axis_x = axis_x
            new_axis_y = axis_y
        else:
            indecies = tf.range(npoint)
            indecies = tf.tile(tf.expand_dims(indecies, 0),
                               [xyz.get_shape().as_list()[0], 1])
            new_xyz = xyz
            new_normals = normals
            new_axis_x = axis_x
            new_axis_y = axis_y

        batch_size = xyz.get_shape()[0].value
        new_points_list = []

        for i in range(len(nsample_list)):
            radius = radius_list[i]
            print(radius)
            nsample = nsample_list[i]
            nk = kernel.get_shape().as_list()[0]
            kernel = kernel
            sita = scale
            if knn == 1:
                radius = 0

            _, idx = knn_point(nsample, xyz, new_xyz, d=d[i])

            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])

            if weight is None:
                _, proj, _, kernel_out, weight, kernel_fit = transform_neighbors(
                    nsample, idx, xyz, new_xyz, new_normals, new_axis_x,
                    new_axis_y, kernel, sita, interp, fit, radius)
                proj = relative_pos_encoding(proj)
                if interp != 2:
                    # weight=tf.nn.softmax(weight,axis=-2)
                    weight = weight / tf.reduce_sum(
                        weight, axis=-2, keep_dims=True)
                weight = tf.expand_dims(weight, 3)
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = proj
            ########### addition xyz features
            if use_xyz_feature:
                if xyz_feature is None:
                    xyz_feature = proj
                else:
                    xyz_feature = group_point(xyz_feature, idx)
                edge_feature = proj

                edge_feature = tf_util.conv2d(edge_feature,
                                              mlp[0], [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=bn,
                                              is_training=is_training,
                                              scope='xyz_feature_%d' % (0),
                                              bn_decay=bn_decay)
                edge_feature = tf_util.conv2d(edge_feature,
                                              mlp[0], [1, 1],
                                              padding='VALID',
                                              stride=[1, 1],
                                              bn=bn,
                                              is_training=is_training,
                                              scope='xyz_feature_%d' % (1),
                                              bn_decay=bn_decay)
                output_feature = tf.concat([xyz_feature, edge_feature],
                                           axis=-1)
                if end == False:
                    xyz_feature = tf_util.conv2d(output_feature,
                                                 mlp[-1], [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 is_training=is_training,
                                                 scope='xyz_feature2',
                                                 bn_decay=bn_decay)
                    # we can try sum and mean
                    xyz_feature = tf.reduce_max(xyz_feature,
                                                axis=[2],
                                                keep_dims=True,
                                                name='maxpool')
                    xyz_feature = tf.squeeze(xyz_feature, [2])
            if use_xyz_feature:
                grouped_points = tf.concat([grouped_points, output_feature],
                                           axis=-1)
            #ASFConv,加一下for
            if first_layer:
                grouped_points = tf_util.conv2d(grouped_points,
                                                mlp_list[i][0], [1, 1],
                                                padding='VALID',
                                                stride=[1, 1],
                                                bn=bn,
                                                is_training=is_training,
                                                scope='conv%d_%d' % (i, 0),
                                                bn_decay=bn_decay)
            # Discrete Conv
            new_points = DiscreteConv(grouped_points, mlp_list, bn, i,
                                      is_training, bn_decay, weight, nk,
                                      kernel_fit)
            new_points_list.append(new_points)
        new_points = tf.concat(new_points_list, axis=-1)

        if first_layer:
            return new_xyz, new_points, new_normals, new_axis_x, new_axis_y, kernel_out, weight, kernel_fit, xyz_feature
        else:
            return new_xyz, new_points, new_normals, new_axis_x, new_axis_y, _, weight, _, xyz_feature
示例#22
0
    def build_graph(self, *inputs_dict):
        inputs_dict = dict(zip(self.input_names, inputs_dict))

        ####### concat pointclouds
        pcdset = [inputs_dict['anchor']]
        if self.config.num_pos > 0:
            pcdset.append(tf.reshape(inputs_dict['pos'], [-1, self.config.num_points, 3]))
        if self.config.num_neg > 0:
            pcdset.append(tf.reshape(inputs_dict['neg'], [-1, self.config.num_points, 3]))
        if self.config.other_neg:
            pcdset.append(inputs_dict['otherneg'])
        points = tf.concat(pcdset, 0, name='pointclouds')  # query+pos+neg+otherneg, numpts, 3

        if self.input_knn_indices:
            knn_ind_set = [inputs_dict['knn_ind_anchor']]
            if inputs_dict.get('knn_ind_pos'):
                knn_ind_set.append(inputs_dict['knn_ind_pos'])
            if inputs_dict.get('knn_ind_neg'):
                knn_ind_set.append(inputs_dict['knn_ind_neg'])
            knn_inds = tf.concat(knn_ind_set, 0, name='knn_inds')
            self.knn_indices = tf.transpose(knn_inds, perm=[0, 2, 1])  # batch, k. numpts
        else:
            self.knn_indices, distances = knn_bruteforce(tf.transpose(points, perm=[0, 2, 1]), k=self.config.knn_num)

        if self.config.sampled_kpnum > 0:
            sample_nodes_concat = tf.concat([inputs_dict['sample_ind_anchor'], inputs_dict['sample_ind_pos']], 0)
            self.sample_nodes_concat = tf.expand_dims(sample_nodes_concat, 2)
        else:
            self.sample_nodes_concat = None

        freeze_local = self.config.freezebackbone
        freeze_det = self.config.freezedetection
        freeze_global = self.config.freezeglobal

        ####### get local features
        outs = {}
        outs['xyz'] = points
        outs['knn_indices'] = self.knn_indices
        if self.config.input_R:
            outs['R'] = inputs_dict['R']

        newpoints, localdesc = self.compute_local(points, freeze_local)
        localdesc_l2normed = tf.nn.l2_normalize(localdesc, dim=2, epsilon=1e-8, name='feat_l2normed')
        outs['feat'] = localdesc
        outs['local_desc'] = localdesc_l2normed

        saved_tensor_xyz_feat = tf.concat([newpoints, localdesc_l2normed], -1, name='xyz_feat')


        ####### get local attentions
        if self.config.detection:
            detect_att = getattr(backbones, self.detection_block)(localdesc, freeze_det=freeze_det)
            outs['attention'] = detect_att
            saved_tensor_xyz_feat_att = tf.concat([newpoints, localdesc_l2normed, detect_att], -1, name='xyz_feat_att')

        if self.config.sampled_kpnum > 0:
            outs['sample_nodes_concat'] = self.sample_nodes_concat
            localxyzsample, localfeatsample, kp_indices = backbones.subsample(points, localdesc_l2normed,
                                                                                  self.config.sampled_kpnum,
                                                                                  kp_idx=self.sample_nodes_concat)
            outs['feat_sampled'] = localfeatsample
            outs['xyz_sampled'] = localxyzsample
            xyz_feat = tf.concat([localxyzsample, localfeatsample], -1, name='xyz_feat_sampled')
            if self.config.get('detection'):
                att_sampled = tf.squeeze(group_point(detect_att, kp_indices), axis=-1)
                outs['att_sampled'] = att_sampled

        #### get global features
        if self.config.extract_global:
            globaldesc = self.compute_global(outs, freeze_global=freeze_global)
            globaldesc_l2normed = tf.nn.l2_normalize(globaldesc, dim=-1, epsilon=1e-8, name='globaldesc')
            outs['global_desc'] = globaldesc_l2normed

        ### loss
        if self.training:
            return self.compute_loss(outs)
示例#23
0
def sample_and_group(npoint,
                     radius,
                     nsample,
                     xyz,
                     points,
                     tnet_spec=None,
                     knn=False,
                     use_xyz=True,
                     keypoints=None,
                     orientations=None,
                     normalize_radius=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        keypoints: None or tensor with shape [None, None, 3], containing the xyz of keypoints.
                   If provided, npoint will be ignored, and iterative furthest sampling will be skipped
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor, i.e. cluster center (dim=3)
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor (dim=3+c, first 3 dimensions are normalized XYZ)
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions. This is usually the first 3 dimensions of new_points
    '''

    end_points = {}

    if keypoints is not None:
        new_xyz = keypoints
    else:
        new_xyz = gather_point(xyz, farthest_point_sample(
            npoint, xyz))  # (batch_size, npoint, 3)

    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
        pts_cnt = nsample  # Hack. By right should make sure number of input points < nsample
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz = grouped_xyz - tf.tile(tf.expand_dims(
        new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if normalize_radius:
        grouped_xyz /= radius

    end_points['grouped_xyz_before'] = grouped_xyz

    # 2D-rotate via orientations if necessary
    if orientations is not None:
        cosval = tf.cos(orientations)
        sinval = tf.sin(orientations)
        one = tf.ones_like(cosval)
        zero = tf.zeros_like(cosval)
        R = tf.stack([(cosval, sinval, zero), (-sinval, cosval, zero),
                      (zero, zero, one)],
                     axis=0)
        R = tf.transpose(R, perm=[2, 3, 0, 1])
        grouped_xyz = tf.matmul(grouped_xyz, R)
        end_points['rotation'] = R

    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(
            points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat(
                [grouped_xyz, grouped_points],
                axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    end_points['grouped_xyz'] = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz, end_points
示例#24
0
def get_model(point_cloud, is_training,  up_ratio, max_ratio, bradius=1.0,k=30, topk=4,scope='generator', weight_decay=0, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    is_dist = False 
    bn = True
    bottleneck_size = 128

    batch_size = point_cloud.get_shape()[0].value
    num_point_sparse = point_cloud.get_shape()[1].value #n
    num_point_max = num_point_sparse*max_ratio # n*r_max
    num_point_up = num_point_sparse*up_ratio # n*r

    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as sc:
        adj_point = tf_util.pairwise_distance(point_cloud) #[b,n,n]
        features = feature_extraction(point_cloud, scope='feature_extraction', is_training=is_training, bn_decay=None) #[b,n,1,d]

        net = tf_util.conv2d(features, bottleneck_size, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                    bn=True, is_training=is_training, scope='concat_conv1', weight_decay=weight_decay, is_dist=is_dist) #[b,n,1,d]
 

        # obtain knn information
        knn_idx = tf_util.knn(adj_point, k=topk)  #[b,n,k] as index
        edge_feature = tf_util.get_edge_feature(net, nn_idx=knn_idx, k=topk) # [b,n,k,128*2]

        knn_point = group_point(point_cloud, knn_idx) # [b,n,k,3]
        xyz_tile = tf.tile(tf.expand_dims(point_cloud, axis=2), [1,1,topk,1]) # [b,n,k,3]

        dist_point = knn_point - xyz_tile  # [b,n,k,3]
        dist = tf.norm(dist_point, axis=-1, keepdims=True)  # [b,n,k,1]

        dist_feat = tf.concat([xyz_tile, knn_point, dist_point, dist], axis=-1)  # [b,n,k,d]

        dist_feat = tf_util.conv2d(dist_feat, bottleneck_size, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
                    bn=True, is_training=is_training, scope='concat_conv2', weight_decay=weight_decay, is_dist=is_dist) # [b,n,k,d]

        # regress score
        feature_append = tf.concat([edge_feature, dist_feat], axis=-1) # [b,n,k,d]

        score_full = tf_util.conv2d(feature_append, 64, [1,1],
                               padding='VALID', stride=[1,1],
                               bn=bn, is_training=is_training, weight_decay=weight_decay,
                               scope='score_conv1', bn_decay=bn_decay, is_dist=is_dist)
        score_full = tf_util.conv2d(score_full, 64, [1,1],
                               padding='VALID', stride=[1,1],
                               bn=bn, is_training=is_training, weight_decay=weight_decay,
                               scope='score_conv2', bn_decay=bn_decay, is_dist=is_dist)

        score_full = tf_util.conv2d(score_full, max_ratio, [1,1],
                               padding='VALID', stride=[1,1],
                               bn=bn, is_training=is_training, weight_decay=weight_decay,
                               scope='score_conv3', bn_decay=bn_decay, is_dist=is_dist, activation_fn=None, bias=False) # [b,n,k,r_max]

        score_full = tf.nn.softmax(score_full, axis=2) # [b,n,k,r_max]
        score_full = tf.transpose(score_full, perm=[0,1,3,2]) # [b,n,r_max,k]
        score = score_full[:,:,0:up_ratio,:] # [b,n,r,k]
 
        # regress linear combination points
        S_point1_tmp = tf.matmul(score, knn_point)  # [b,n,r,3]
        S_point1 = tf.reshape(S_point1_tmp, [batch_size, num_point_up,3])  # [b,n*r,3]

        
        # self attention
        S_feat = tf.matmul(score, feature_append) # [b,n,r, 128*3]
        S_feat = tf.concat([S_point1_tmp, S_feat], axis=-1) # [b,n,r, 128*3]
        S_feat = tf.reshape(S_feat, [batch_size, num_point_up, 387]) # [b,n*r, 128*3]
        S_feat = tf_util.conv1d(S_feat, 128, 1,
                           padding='VALID', stride=1,
                           bn=False, is_training=is_training, weight_decay=weight_decay,
                           scope='sa_conv0', bn_decay=bn_decay, is_dist=is_dist)
        
        feat_q = tf_util.conv1d(S_feat, 128, 1,
                           padding='VALID', stride=1,
                           bn=False, is_training=is_training, weight_decay=weight_decay,
                           scope='sa_q1', bn_decay=bn_decay, is_dist=is_dist, bias=False) # [b,n*r, 128]
        feat_v = tf_util.conv1d(S_feat, 128, 1,
                           padding='VALID', stride=1,
                           bn=False, is_training=is_training, weight_decay=weight_decay,
                           scope='sa_v1', bn_decay=bn_decay, is_dist=is_dist) # [b,n*r, 128]
        energy = tf.matmul(feat_q, tf.transpose(feat_q, perm=[0,2,1])) #[b,n*r,n*r]
        attention = tf.nn.softmax(energy, axis=1)
        attention = attention / (1e-9 + tf.reduce_sum(attention, axis=-1, keepdims=True)) #[b,n*r,n*r]
        feat_sa = tf.matmul(attention, feat_v)  #[b,n*r,128]
        feat_sa = tf_util.conv1d(feat_sa, 128, 1,
                           padding='VALID', stride=1,
                           bn=True, is_training=is_training, weight_decay=weight_decay,
                           scope='sa_conv1', bn_decay=bn_decay, is_dist=is_dist)
        
        # refinement by self attention
        net = tf_util.conv1d(feat_sa, 64, 1,
                           padding='VALID', stride=1,
                           bn=False, is_training=is_training, weight_decay=weight_decay,
                           scope='sa_conv2', bn_decay=bn_decay, is_dist=is_dist)

        sa_offset = tf_util.conv1d(net, 3, 1,
                           padding='VALID', stride=1,
                           bn=False, is_training=is_training, weight_decay=weight_decay,
                           scope='sa_conv3', bn_decay=bn_decay, is_dist=is_dist, activation_fn=None) # [b,n*r,3]


        S_point2 = S_point1 + sa_offset


    return S_point2, S_point1
示例#25
0
def pointnet_sa_module_msg(xyz,
                           points,
                           npoint,
                           radius_list,
                           nsample_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           use_nchw=False):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: list of float32 -- search radius in local region
            nsample: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
            use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    with tf.variable_scope(scope) as sc:
        new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
        new_points_list = []
        for i in range(len(radius_list)):
            radius = radius_list[i]
            nsample = nsample_list[i]
            idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, nsample, 1])
            if points is not None:
                grouped_points = group_point(points, idx)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 3, 1, 2])
            for j, num_out_channel in enumerate(mlp_list[i]):
                ####################################
                grouped_points = Ops.xxlu(Ops.conv2d(grouped_points,
                                                     k=(1, 1),
                                                     out_c=num_out_channel,
                                                     str=1,
                                                     pad='VALID',
                                                     name='lll' + str(i)),
                                          label='lrelu')
                #grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1],
                #padding='VALID', stride=[1,1], bn=bn, is_training=is_training,scope='conv%d_%d'%(i,j), bn_decay=bn_decay)

            if use_nchw:
                grouped_points = tf.transpose(grouped_points, [0, 2, 3, 1])
            new_points = tf.reduce_max(grouped_points, axis=[2])
            new_points_list.append(new_points)
        new_points_concat = tf.concat(new_points_list, axis=-1)
        return new_xyz, new_points_concat
示例#26
0
def get_smooth_loss(pred,
                    normal,
                    nsample=20,
                    radius=0.05,
                    knn=False,
                    selected=True,
                    re_weight=False,
                    grouping=None):
    # pred: (batch_size, npoint,3)
    # normal : (batch_size,npoint,3)
    if selected:
        radius = 1.0 * radius
        nsample = int(1.0 * nsample)
    # first get some neighborhood points
    if knn:
        with tf.device('/gpu:1'):
            _, idx = knn_point(nsample, pred, pred)
        pts_cnt = tf.constant(nsample, shape=(30, 1024))
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, pred, pred)
    tf.summary.histogram('smooth/unque_index', pts_cnt)

    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)
    dists = tf.reduce_sum(grouped_pred**2, axis=-1)
    val, idx = tf.nn.top_k(-dists, 5)
    idx = idx[:, :, 1:]
    grouped_pred = group_point(pred, idx)  # (batch_size, npoint, nsample, 3)
    grouped_pred -= tf.expand_dims(pred, 2)

    grouped_pred_normilize = tf.nn.l2_normalize(grouped_pred, dim=-1)
    inner_product = tf.abs(
        tf.reduce_sum(grouped_pred_normilize * tf.expand_dims(normal, axis=2),
                      axis=-1))  # (batch_size, npoint,nsample)
    if re_weight:
        alpha = 5
        inner_product = (tf.exp(alpha * inner_product) - 1) / (
            np.exp(alpha) - 1)  # (batch_size, npoint,nsample)
    if grouping == 'exp_weighted':
        epision = 1e-12
        dists = tf.norm(grouped_pred + epision,
                        axis=-1)  # (batch_size, npoint,nsample)
        dists = tf.maximum(dists, 1e-10)  # (batch_size, npoint,nsample)
        exp_dists = tf.exp(-dists * 20)  # (batch_size, npoint,nsample)
        weights = exp_dists / tf.reduce_sum(
            exp_dists, axis=2, keep_dims=True)  # (batch_size, npoint, nsample)
        tf.summary.histogram('smooth/weighted', weights)
        inner_product = weights * inner_product

    if selected:
        grouped_normal = group_point(normal, idx)
        mask = tf.to_float(
            tf.greater(
                tf.reduce_sum(grouped_normal * tf.expand_dims(normal, axis=2),
                              axis=-1), 0.0))
        tf.summary.histogram('smooth/mask1', tf.count_nonzero(mask, axis=-1))
        smooth_loss = tf.reduce_sum(mask * inner_product) / tf.reduce_sum(mask)
    else:
        smooth_loss = tf.reduce_mean(inner_product)

    return smooth_loss
示例#27
0
def pool(xyz, points, k, npoint):
    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
    _, idx = knn_point(k, xyz, new_xyz)
    new_points = tf.reduce_max(group_point(points, idx), axis=2)

    return new_xyz, new_points
示例#28
0
def res_gcn_up(xyz,
               points,
               k,
               n_cout,
               n_blocks,
               is_training,
               scope,
               bn_decay=None,
               use_bn=False,
               use_ibn=False,
               indices=None,
               up_ratio=2):
    with tf.variable_scope(scope):
        for idx in range(n_blocks):
            with tf.variable_scope('block_{}'.format(idx)):
                shortcut = points

                # Center Features
                points = batch_norm(points,
                                    is_training,
                                    'bn_center',
                                    bn_decay=bn_decay,
                                    use_bn=use_bn,
                                    use_ibn=use_ibn)
                points = tf.nn.relu(points)
                # Neighbor Features
                if idx == 0 and indices is None:
                    _, grouped_points, indices = group(xyz, points, k)
                else:
                    grouped_points = group_point(points, indices)
                # Center Conv
                center_points = tf.expand_dims(points, axis=2)
                points = conv2d(center_points, n_cout, name='conv_center')
                # Neighbor Conv
                grouped_points_nn = conv2d(grouped_points,
                                           n_cout,
                                           name='conv_neighbor')
                # CNN
                points = tf.reduce_mean(tf.concat([points, grouped_points_nn],
                                                  axis=2),
                                        axis=2) + shortcut

                if idx == n_blocks - 1:
                    # Center Conv
                    points_xyz = conv2d(center_points,
                                        3 * up_ratio,
                                        name='conv_center_xyz')
                    # Neighbor Conv
                    grouped_points_xyz = conv2d(grouped_points,
                                                3 * up_ratio,
                                                name='conv_neighbor_xyz')
                    # CNN
                    new_xyz = tf.reduce_mean(tf.concat(
                        [points_xyz, grouped_points_xyz], axis=2),
                                             axis=2)
                    new_xyz = tf.reshape(
                        new_xyz,
                        [-1, new_xyz.get_shape()[1].value, up_ratio, 3])
                    new_xyz = new_xyz + tf.expand_dims(xyz, axis=2)
                    new_xyz = tf.reshape(
                        new_xyz,
                        [-1, new_xyz.get_shape()[1].value * up_ratio, 3])

                    return new_xyz, points
示例#29
0
def pointnet_sa_module(xyz,
                       points,
                       xyz_feature,
                       npoint,
                       radius,
                       nsample,
                       mlp,
                       mlp2,
                       mlp3,
                       group_all,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       pooling='max',
                       tnet_spec=None,
                       knn=False,
                       use_xyz=False,
                       end=False,
                       use_xyz_feature=False):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            points: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            radius: float32 -- search radius in local region
            nsample: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            mlp2: list of int32 -- output size for MLP on each region
            group_all: bool -- group all points into one PC if set true, OVERRIDE
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
            idx: (batch_size, npoint, nsample) int32 -- indices for local regions
    '''
    with tf.variable_scope(scope) as sc:
        batch_size = xyz.get_shape()[0].value
        if group_all:
            nsample = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        idx = tf.cast(idx, tf.int32)
        ########### additional xyz features
        if use_xyz_feature:
            xyz_feature = group_point(xyz_feature, idx)

            e, grouped_xyz = PCA_decompose(grouped_xyz)
            e = tf.tile(tf.expand_dims(e, 2), [1, 1, nsample, 1])
            edge_feature = tf.concat(
                [relative_pos_encoding(tf.abs(grouped_xyz)), e], axis=-1)

            edge_feature = tf_util.conv2d(edge_feature,
                                          mlp3[0], [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='xyz_feature_%d' % (0),
                                          bn_decay=bn_decay)

            edge_feature = tf_util.conv2d(edge_feature,
                                          mlp3[1], [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='xyz_feature_%d' % (1),
                                          bn_decay=bn_decay)

            output_feature = tf.concat([xyz_feature, edge_feature], axis=-1)
            if end == False:
                xyz_feature = tf_util.conv2d(output_feature,
                                             mlp3[-1], [1, 1],
                                             padding='VALID',
                                             stride=[1, 1],
                                             bn=bn,
                                             is_training=is_training,
                                             scope='xyz_feature2',
                                             bn_decay=bn_decay)
                # we can try sum and mean
                xyz_feature = tf.reduce_max(xyz_feature,
                                            axis=[2],
                                            keep_dims=True,
                                            name='maxpool')
                xyz_feature = tf.squeeze(xyz_feature, [2])

            new_points = tf.concat([new_points, output_feature], axis=-1)

        for i, num_out_channel in enumerate(mlp):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='SAME',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv%d' % (i),
                                        bn_decay=bn_decay)

        if pooling == 'avg':
            new_points = tf_util.avg_pool2d(new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='avgpool1')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg1'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists / tf.reduce_sum(
                    exp_dists, axis=2,
                    keep_dims=True)  # (batch_size, npoint, nsample, 1)
                new_points *= weights  # (batch_size, npoint, nsample, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling == 'max':
            new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True)
        elif pooling == 'min':
            new_points = tf_util.max_pool2d(-1 * new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='minpool1')
        elif pooling == 'max_and_avg':
            avg_points = tf_util.max_pool2d(new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='maxpool1')
            max_points = tf_util.avg_pool2d(new_points, [1, nsample],
                                            stride=[1, 1],
                                            padding='VALID',
                                            scope='avgpool1')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp2):
            new_points = tf_util.conv2d(new_points,
                                        num_out_channel, [1, 1],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='conv_post_%d' % (i),
                                        bn_decay=bn_decay)
        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])
        return new_xyz, new_points, idx, xyz_feature