Esempio n. 1
0
def feature_decoding_layer(xyz1,
                           xyz2,
                           points1,
                           points2,
                           radius,
                           sigma,
                           K,
                           mlp,
                           is_training,
                           bn_decay,
                           weight_decay,
                           scope,
                           bn=True,
                           use_xyz=True,
                           boundary_label=None):
    ''' Input:
            xyz1: (batch_size, ndataset1, 3) TF tensor
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1
            points1: (batch_size, ndataset1, nchannel1) TF tensor
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        #setup for deConv
        if boundary_label is None:
            pass
        else:
            tmp_boundary_label = tf.tile(tf.expand_dims(boundary_label, [-1]),
                                         [1, 1, interpolated_points.shape[2]])
            interpolated_points = interpolated_points * tmp_boundary_label
        grouped_xyz, grouped_feature, idx, grouped_boundary_label = pointconv_util.grouping(
            interpolated_points,
            K,
            xyz1,
            xyz1,
            use_xyz=use_xyz,
            boundary_label=boundary_label)

        weight = weight_net_hidden(grouped_xyz, [32],
                                   scope='decode_weight_net',
                                   is_training=is_training,
                                   bn_decay=bn_decay,
                                   weight_decay=weight_decay)

        new_points = tf.transpose(grouped_feature, [0, 1, 3, 2])
        new_points = tf.matmul(new_points, weight)
        new_points = tf_util.conv2d(new_points,
                                    mlp[0],
                                    [1, new_points.get_shape()[2].value],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='decode_after_conv',
                                    bn_decay=bn_decay,
                                    weight_decay=weight_decay)

        if points1 is not None:
            new_points1 = tf.concat(axis=-1,
                                    values=[
                                        new_points,
                                        tf.expand_dims(points1, axis=2)
                                    ])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points

        for i, num_out_channel in enumerate(mlp):
            if i != 0:
                new_points1 = tf_util.conv2d(new_points1,
                                             num_out_channel, [1, 1],
                                             padding='VALID',
                                             stride=[1, 1],
                                             bn=bn,
                                             is_training=is_training,
                                             scope='conv_%d' % (i),
                                             bn_decay=bn_decay,
                                             weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
Esempio n. 2
0
def feature_encoding_layer_extra(xyz,
                                 sample_xyz,
                                 sample_boundary,
                                 feature,
                                 npoint,
                                 radius,
                                 sigma,
                                 K,
                                 mlp,
                                 local_num_out_channel,
                                 is_training,
                                 bn_decay,
                                 weight_decay,
                                 scope,
                                 bn=True,
                                 use_xyz=True,
                                 boundary_label=None):
    ''' Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            feature: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
    '''
    batch_size = xyz.shape[0]

    with tf.variable_scope(scope) as sc:
        num_points = xyz.get_shape()[1]
        if num_points == npoint:
            new_xyz = xyz
            sub_boundary_label = boundary_label
        else:
            if boundary_label is None:
                #new_xyz = pointconv_util.sampling(npoint, xyz)
                new_xyz = sample_xyz
                sub_boundary_label = None
            else:
                #new_xyz, sub_boundary_label = pointconv_util.sampling_with_boundary_label(npoint, xyz, boundary_label)
                new_xyz = sample_xyz
                sub_boundary_label = sample_boundary

        if boundary_label is None:
            pass
        else:
            tmp_boundary_label = tf.tile(tf.expand_dims(boundary_label, [-1]),
                                         [1, 1, feature.shape[2]])
            feature = feature * tmp_boundary_label

        if local_num_out_channel is not None:
            xyz_4n, feature_4n, idx_4n, _ = pointconv_util.grouping(
                feature, 4, xyz, xyz)
            xyz_3n = xyz_4n[:, :, 1:4, :]
            local_feature = tf_util.local_feature(xyz_3n,
                                                  local_num_out_channel, bn,
                                                  is_training, 0, bn_decay,
                                                  weight_decay)
            feature = tf.concat([feature, local_feature], axis=2)
        grouped_xyz, grouped_feature, idx, grouped_boundary_label = pointconv_util.grouping(
            feature, K, xyz, new_xyz, boundary_label=boundary_label)

        for i, num_out_channel in enumerate(mlp):
            if i != len(mlp) - 1:
                grouped_feature = tf_util.conv2d(grouped_feature,
                                                 num_out_channel, [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 is_training=is_training,
                                                 scope='conv%d' % (i),
                                                 bn_decay=bn_decay,
                                                 weight_decay=weight_decay)

        if grouped_boundary_label is not None:
            tmp_grouped_boundary_label = tf.tile(
                tf.expand_dims(grouped_boundary_label, [-1]),
                [1, 1, 1, grouped_feature.shape[3]])
            grouped_feature = grouped_feature * tmp_grouped_boundary_label

        weight = weight_net_hidden(grouped_xyz, [32],
                                   scope='weight_net',
                                   is_training=is_training,
                                   bn_decay=bn_decay,
                                   weight_decay=weight_decay)

        new_points = tf.transpose(grouped_feature, [0, 1, 3, 2])
        new_points = tf.matmul(new_points, weight)
        new_points = tf_util.conv2d(new_points,
                                    mlp[-1],
                                    [1, new_points.get_shape()[2].value],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='after_conv',
                                    bn_decay=bn_decay,
                                    weight_decay=weight_decay)

        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])

        return new_xyz, new_points, sub_boundary_label
Esempio n. 3
0
def feature_encoding_layer_msg(xyz,
                               feature,
                               npoint,
                               radius,
                               sigma,
                               K,
                               mlp,
                               is_training,
                               bn_decay,
                               weight_decay,
                               scope,
                               bn=True,
                               use_xyz=True):
    ''' Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            feature: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            sigma: float32 -- KDE bandwidth
            radius_list: list of radius for local region
            K_list: int32 -- list of how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
    '''
    radius_list = radius
    K_list = K
    sigma_list = sigma
    num_points = xyz.get_shape()[1]
    if num_points == npoint:
        new_xyz = xyz
    else:
        new_xyz = pointconv_util.sampling(npoint, xyz)
    new_points_list = []

    for i in range(len(radius_list)):
        with tf.variable_scope("%s_%d" % (scope, i)) as sc:
            radius = radius_list[i]
            K = K_list[i]
            sigma = sigma_list[i]

            grouped_xyz, grouped_feature, idx = pointconv_util.grouping(
                feature, K, xyz, new_xyz)

            density = pointconv_util.kernel_density_estimation_ball(
                xyz, radius, sigma)
            inverse_density = tf.div(1.0, density)
            grouped_density = tf.gather_nd(
                inverse_density, idx)  # (batch_size, npoint, nsample, 1)
            #grouped_density = tf_grouping.group_point(inverse_density, idx)
            inverse_max_density = tf.reduce_max(grouped_density,
                                                axis=2,
                                                keep_dims=True)
            density_scale = tf.div(grouped_density, inverse_max_density)

            #density_scale = tf_grouping.group_point(density, idx)

            for j, num_out_channel in enumerate(mlp):
                if j != len(mlp) - 1:
                    grouped_feature = tf_util.conv2d(grouped_feature,
                                                     num_out_channel, [1, 1],
                                                     padding='VALID',
                                                     stride=[1, 1],
                                                     bn=bn,
                                                     is_training=is_training,
                                                     scope='conv%d' % j,
                                                     bn_decay=bn_decay,
                                                     weight_decay=weight_decay)

            weight = weight_net_hidden(grouped_xyz, [32],
                                       scope='weight_net',
                                       is_training=is_training,
                                       bn_decay=bn_decay,
                                       weight_decay=weight_decay)

            density_scale = nonlinear_transform(density_scale, [16, 1],
                                                scope='density_net',
                                                is_training=is_training,
                                                bn_decay=bn_decay,
                                                weight_decay=weight_decay)

            new_points = tf.multiply(grouped_feature, density_scale)

            new_points = tf.transpose(new_points, [0, 1, 3, 2])

            new_points = tf.matmul(new_points, weight)

            new_points = tf_util.conv2d(new_points,
                                        mlp[-1],
                                        [1, new_points.get_shape()[2].value],
                                        padding='VALID',
                                        stride=[1, 1],
                                        bn=bn,
                                        is_training=is_training,
                                        scope='after_conv',
                                        bn_decay=bn_decay,
                                        weight_decay=weight_decay)

            new_points = tf.squeeze(new_points,
                                    [2])  # (batch_size, npoints, mlp2[-1])

            new_points_list.append(new_points)
    new_points = tf.concat(new_points_list, axis=-1)

    return new_xyz, new_points
Esempio n. 4
0
def feature_decoding_layer_depthwise(xyz1,
                                     xyz2,
                                     points1,
                                     points2,
                                     radius,
                                     sigma,
                                     K,
                                     mlp,
                                     is_training,
                                     bn_decay,
                                     weight_decay,
                                     scope,
                                     bn=True,
                                     use_xyz=True):
    ''' Input:                                      
            depthwise version of pointconv                                                                
            xyz1: (batch_size, ndataset1, 3) TF tensor                                                              
            xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1                                           
            points1: (batch_size, ndataset1, nchannel1) TF tensor                                                   
            points2: (batch_size, ndataset2, nchannel2) TF tensor
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (batch_size, ndataset1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points2, idx, weight)

        #setup for deConv
        grouped_xyz, grouped_feature, idx = pointconv_util.grouping(
            interpolated_points, K, xyz1, xyz1, use_xyz=use_xyz)

        density = pointconv_util.kernel_density_estimation_ball(
            xyz1, radius, sigma)
        inverse_density = tf.div(1.0, density)
        grouped_density = tf.gather_nd(inverse_density,
                                       idx)  # (batch_size, npoint, nsample, 1)
        #grouped_density = tf_grouping.group_point(inverse_density, idx)
        inverse_max_density = tf.reduce_max(grouped_density,
                                            axis=2,
                                            keep_dims=True)
        density_scale = tf.div(grouped_density, inverse_max_density)

        #density_scale = tf_grouping.group_point(density, idx)

        weight = weight_net(grouped_xyz,
                            [32, grouped_feature.get_shape()[3].value],
                            scope='decode_weight_net',
                            is_training=is_training,
                            bn_decay=bn_decay,
                            weight_decay=weight_decay)

        density_scale = nonlinear_transform(density_scale, [16, 1],
                                            scope='decode_density_net',
                                            is_training=is_training,
                                            bn_decay=bn_decay,
                                            weight_decay=weight_decay)

        new_points = tf.multiply(grouped_feature, density_scale)

        new_points = tf.multiply(grouped_feature, weight)

        new_points = tf_util.reduce_sum2d_conv(new_points,
                                               axis=2,
                                               scope='fp_sumpool',
                                               bn=True,
                                               bn_decay=bn_decay,
                                               is_training=is_training,
                                               keep_dims=False)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=-1, values=[new_points,
                                 points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = new_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = tf_util.conv2d(new_points1,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv_%d' % (i),
                                         bn_decay=bn_decay,
                                         weight_decay=weight_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,ndataset1,mlp[-1]
        return new_points1
Esempio n. 5
0
def get_boundary_model_loss(labels_pl,
                            point_cloud,
                            is_training,
                            num_class,
                            sigma,
                            bn_decay=None,
                            weight_decay=None):
    # generate boundary 0 if boundary else 1
    num_neighbor = 16
    ratio = 0.6
    g_xyz, g_labels, g_idx, _ = pointconv_util.grouping(tf.cast(
        tf.expand_dims(labels_pl, [-1]), tf.float32),
                                                        num_neighbor,
                                                        point_cloud[:, :, :3],
                                                        point_cloud[:, :, :3],
                                                        use_xyz=False)
    g_labels = tf.squeeze(g_labels, [-1])
    self_labels = tf.cast(
        tf.tile(tf.expand_dims(labels_pl, [-1]), [1, 1, num_neighbor]),
        tf.float32)
    same_label_num = tf.reduce_sum(tf.cast(tf.equal(g_labels, self_labels),
                                           tf.float32),
                                   axis=2)

    boundary_points = tf.cast(
        tf.greater_equal(same_label_num, (num_neighbor * ratio)), tf.int32)
    boundary_points = tf.cast(boundary_points, tf.float32)

    target_boundary_label = tf.stop_gradient(boundary_points)

    # ========================================

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud[:, :, :3]
    l0_points = point_cloud[:, :, 3:6]

    #difference extraction
    _, tmp_grouped_feature, _, _ = pointconv_util.grouping(l0_points,
                                                           8,
                                                           l0_xyz,
                                                           l0_xyz,
                                                           boundary_label=None,
                                                           use_xyz=False)
    mean, var = tf.nn.moments(tmp_grouped_feature, axes=2)
    print(var.shape)
    # tmp_average_feature = tf.reduce_mean(tmp_grouped_feature, axis=2)
    l0_points = var

    #Feature encoding layer
    # l1_xyz, l1_points, _ = feature_encoding_layer(l0_xyz, l0_points, npoint=2048, radius = 0.1, sigma = sigma, K=8, mlp=[32,32,64], is_training=is_training, bn_decay=bn_decay, weight_decay = weight_decay, scope='boundary_layer1')

    #Feature decoding layer
    # l0_points = feature_decoding_layer(l0_xyz, l1_xyz, l0_points, l1_points, 0.1, sigma, 8, [128,128,128], is_training, bn_decay, weight_decay, scope='boundary_fa_layer2')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         32,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='boundary_fc1',
                         bn_decay=bn_decay,
                         weight_decay=weight_decay)
    net = tf_util.conv1d(net,
                         64,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='boundary_fc2',
                         bn_decay=bn_decay,
                         weight_decay=weight_decay)
    net = tf_util.conv1d(net,
                         64,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='boundary_fc3',
                         bn_decay=bn_decay,
                         weight_decay=weight_decay)
    net = tf_util.conv1d(net,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='boundary_fc4',
                         bn_decay=bn_decay,
                         weight_decay=weight_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         1,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         weight_decay=weight_decay,
                         scope='boundary_fc5')
    net = tf.squeeze(net, [2])

    boundary_weight = 2 - 1 * target_boundary_label
    boundary_loss = tf.losses.sigmoid_cross_entropy(tf.cast(
        target_boundary_label, tf.int32),
                                                    net,
                                                    weights=boundary_weight)

    return net, boundary_loss
Esempio n. 6
0
def feature_encoding_layer_extra(xyz,
                                 sample_xyz,
                                 feature,
                                 npoint,
                                 radius,
                                 sigma,
                                 K,
                                 mlp,
                                 akc_channel,
                                 is_training,
                                 bn_decay,
                                 weight_decay,
                                 scope,
                                 bn=True,
                                 use_xyz=True):
    ''' Input:
            xyz: (batch_size, ndataset, 3) TF tensor
            feature: (batch_size, ndataset, channel) TF tensor
            npoint: int32 -- #points sampled in farthest point sampling
            sigma: float32 -- KDE bandwidth
            K: int32 -- how many points in each local region
            mlp: list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (batch_size, npoint, 3) TF tensor
            new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:
        num_points = xyz.get_shape()[1]
        if num_points == npoint:
            new_xyz = xyz
        else:
            new_xyz = sample_xyz

        if akc_channel is not None:
            xyz_4n, feature_4n, idx_4n = pointconv_util.grouping(
                feature, 4, xyz, xyz)
            xyz_3n = xyz_4n[:, :, 1:4, :]
            akc_feature = tf_util.akc(xyz_3n, akc_channel, bn, is_training, 0,
                                      bn_decay, weight_decay)
            feature = tf.concat([feature, akc_feature], axis=2)
        grouped_xyz, grouped_feature, idx = pointconv_util.grouping(
            feature, K, xyz, new_xyz)

        density = pointconv_util.kernel_density_estimation_ball(
            xyz, radius, sigma)
        inverse_density = tf.div(1.0, density)
        grouped_density = tf.gather_nd(inverse_density,
                                       idx)  # (batch_size, npoint, nsample, 1)
        #grouped_density = tf_grouping.group_point(inverse_density, idx)
        inverse_max_density = tf.reduce_max(grouped_density,
                                            axis=2,
                                            keepdims=True)
        density_scale = tf.div(grouped_density, inverse_max_density)

        #density_scale = tf_grouping.group_point(density, idx)

        for i, num_out_channel in enumerate(mlp):
            if i != len(mlp) - 1:
                grouped_feature = tf_util.conv2d(grouped_feature,
                                                 num_out_channel, [1, 1],
                                                 padding='VALID',
                                                 stride=[1, 1],
                                                 bn=bn,
                                                 is_training=is_training,
                                                 scope='conv%d' % (i),
                                                 bn_decay=bn_decay,
                                                 weight_decay=weight_decay)

        weight = weight_net_hidden(grouped_xyz, [32],
                                   scope='weight_net',
                                   is_training=is_training,
                                   bn_decay=bn_decay,
                                   weight_decay=weight_decay)

        density_scale = nonlinear_transform(density_scale, [16, 1],
                                            scope='density_net',
                                            is_training=is_training,
                                            bn_decay=bn_decay,
                                            weight_decay=weight_decay)

        new_points = tf.multiply(grouped_feature, density_scale)

        new_points = tf.transpose(new_points, [0, 1, 3, 2])

        new_points = tf.matmul(new_points, weight)

        new_points = tf_util.conv2d(new_points,
                                    mlp[-1],
                                    [1, new_points.get_shape()[2].value],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='after_conv',
                                    bn_decay=bn_decay,
                                    weight_decay=weight_decay)

        new_points = tf.squeeze(new_points,
                                [2])  # (batch_size, npoints, mlp2[-1])

        return new_xyz, new_points