def get_edgeconv(point_cloud,
                 k,
                 mlp,
                 is_training,
                 bn_decay,
                 scope,
                 bn=True,
                 associated=None,
                 is_dist=False):
    with tf.variable_scope(scope) as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        if associated is not None:
            for j, feature in enumerate(associated):
                point_cloud = tf.concat([point_cloud, feature], axis=-1)

        edge_feature = tf_util.get_edge_feature(point_cloud,
                                                nn_idx=nn_idx,
                                                k=k)

        for i, num_out_channel in enumerate(mlp):
            edge_feature = tf_util.conv2d(edge_feature,
                                          num_out_channel, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='conv%d' % (i),
                                          bn_decay=bn_decay,
                                          is_dist=is_dist)

        edge_feature = tf.reduce_max(edge_feature, axis=-2, keep_dims=True)

        return edge_feature
Beispiel #2
0
def get_model_old_3(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    # point_cloud_2_momentum = tf.concat(
    #     [tf.multiply(point_cloud, point_cloud), tf.multiply(tf.roll(point_cloud, shift=1, axis=-1), point_cloud)],
    #     axis=-1)

    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 128, [1, 1],
                         padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training,
                         scope='dgcnn1', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 256, [1, 1],
                         padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training,
                         scope='dgcnn2', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)

    net = tf_util.conv2d(tf.concat(net, axis=-1), 1024, [1, 1],
                         padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training,
                         scope='agg', bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Beispiel #3
0
def EdgeConv(features, edges, out_chs, k, is_training, dropout=0, weight_decay=0, scope='EdgeConv'):
    features = tf.nn.dropout(features, (1 - dropout * tf.cast(is_training, tf.float32)))

    if k == 0 or edges is None:
        edges = np.arange(features.get_shape().as_list()[1]).astype('int32')[None, :, None]

    net = tf_util.get_edge_feature(features, nn_idx=edges, k=k)

    net = tf_util.conv2d(net, out_chs, [1, 1],
                         padding='VALID', stride=[1, 1],
                         bn=False, is_training=is_training, weight_decay=weight_decay,
                         scope=scope + '_conv', is_dist=True)

    net = tf.reduce_sum(net, axis=-2, keep_dims=True)
    return net
def get_edgeconv_groupconv(point_cloud,
                           k,
                           mlp,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           associated=None):
    with tf.variable_scope(scope) as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        if associated is not None:
            for j, feature in enumerate(associated):
                point_cloud = tf.concat([point_cloud, feature], axis=-1)

        net = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

        for i, num_out_channel in enumerate(mlp):
            center, edge_feature = tf.split(net, num_or_size_splits=2, axis=-1)

            center = tf_util.conv2d(center,
                                    num_out_channel, [1, 1],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='centerconv%d' % (i),
                                    bn_decay=bn_decay)

            edge_feature = tf_util.conv2d(edge_feature,
                                          num_out_channel, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='edgeconv%d' % (i),
                                          bn_decay=bn_decay)

            net = channle_shuffle(center, edge_feature)

        edge_feature = tf.reduce_max(net, axis=-2, keep_dims=True)

        return edge_feature
Beispiel #5
0
def edge_conv_layer(inputs,
                    neigh_idx,
                    nn,
                    k,
                    num_outputs,
                    scope=None,
                    is_training=None):
    '''
    EdgeConv layer:
      Wang, Y, Yongbin S, Ziwei L, Sanjay S, Michael B, Justin S.
      "Dynamic graph cnn for learning on point clouds."
      arXiv:1801.07829 (2018).
  '''
    edge_features = tf_util.get_edge_feature(inputs, neigh_idx, k)
    out = nn.build(edge_features,
                   num_outputs,
                   scope=scope,
                   is_training=is_training)
    vertex_features = tf.reduce_max(out, axis=-2, keep_dims=True)

    return vertex_features
Beispiel #6
0
def get_model(points,
              n_iter,
              is_training,
              bn_decay,
              randinit=False,
              nostop=False,
              k=10):
    T = tf.eye(4, batch_shape=(points.shape[0], ))
    T_deltas = []
    for i in range(n_iter):
        transformed_points = tf_util.transform_points(points, T)
        if not nostop:
            transformed_points = tf.stop_gradient(transformed_points)

        adj_matrix = tf_util.pairwise_distance(transformed_points)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(points, nn_idx=nn_idx, k=k)

        qt = input_transform_net(edge_feature, is_training, bn_decay, randinit)
        T_delta = tf.map_fn(tf_util.qt2mat, qt, dtype=tf.float32)
        T_deltas.append(T_delta)
        T = tf.matmul(T_delta, T)
    transformed_points = tf_util.transform_points(points, T)
    return transformed_points, T, T_deltas
Beispiel #7
0
def get_model(point_cloud,
              is_training,
              bn_decay=None,
              weight_decay=None,
              classes=13):
    """ ConvNet baseline, input is BxNx9 gray image """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 20

    adj = tf_util.pairwise_distance(point_cloud[:, :, 6:])
    nn_idx = tf_util.knn(adj, k=k)  # (batch, num_points, k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out3 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out4 = tf_util.conv2d(out3,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv4',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out5 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv5',
                          bn_decay=bn_decay,
                          is_dist=True)

    # out6 = tf_util.conv2d(out5, 64, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training, weight_decay=weight_decay,
    #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

    # CONV
    net = tf_util.conv2d(concat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv1',
                         is_dist=True)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv2',
                         is_dist=True)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv2d(net,
                         classes, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='seg/conv3',
                         is_dist=True)
    net = tf.squeeze(net, [2])

    return net
Beispiel #8
0
def forward(point_cloud, is_training, bn_decay=None):
    """LPD-Net:FNSF,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 13,
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    batch_num_queries = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_points = point_cloud.get_shape()[2].value
    CLUSTER_SIZE=64
    OUTPUT_DIM=256
    k=20
    point_cloud = tf.reshape(point_cloud, [batch_num_queries*num_pointclouds_per_query, num_points,13])

    point_cloud, feature_cloud = tf.split(point_cloud, [3,10], 2)

    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)

    # Neural Network to learn neighborhood features
    # feature_cloud = neural_feature_net(point_cloud, is_training, bn_decay, knn_k=20, F=10)

    point_cloud_input = tf.concat([point_cloud_transformed, feature_cloud], 2)

    point_cloud_input = tf.expand_dims(point_cloud_input, -1)

    net = tf_util.conv2d(point_cloud_input, 64, [1, 13],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net, is_training, bn_decay, K=64)
    feature_transform = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)

    # Serial structure
    # Danymic Graph cnn for feature space
    with tf.variable_scope('DGfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(feature_transform)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # Spatial Neighborhood fusion for cartesian space
    with tf.variable_scope('SNfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        idx_ = tf.range(batch_num_queries*num_pointclouds_per_query) * num_points
        idx_ = tf.reshape(idx_, [batch_num_queries*num_pointclouds_per_query, 1, 1])

        feature_cloud = tf.reshape(net, [-1, 64])
        edge_feature = tf.gather(feature_cloud, nn_idx+idx_)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # MLP for fusion
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    point_wise_feature = net

    NetVLAD = lp.NetVLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE, 
                    output_dim=OUTPUT_DIM, gating=True, add_batch_norm=True,
                    is_training=is_training)

    net= tf.reshape(net,[-1,1024])
    net = tf.nn.l2_normalize(net,1)
    output = NetVLAD.forward(net)
    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output,1)
    output =  tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM])

    return output
Beispiel #9
0
def get_model(point_cloud, filters, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20
    #print(batch_size, num_point)

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

#  print(edge_feature.shape)
    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(
            edge_feature, is_training, bn_decay, K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(
        point_cloud_transformed, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn1', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn2', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn3', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 128, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='dgcnn4', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1],
                         padding='VALID', stride=[1, 1],
                         # bn=True, is_training=is_training,
                         bn=False,
                         scope='agg', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    # print(net)

    # 1 sum  shards' feature (except additional padding shards)
    # print(filters)
    net = tf.multiply(net, filters)   # remove additional padding shards
    net = tf.reduce_sum(net, 0, keep_dims=True)
    print(net)

    net = skip_dense(net, 1024, 10, 0.1, is_training)

    net = tf.contrib.layers.fully_connected(
        net, 5, activation_fn=None, scope='fc3')
    print("final net: ", net.shape)

    return net, end_points
Beispiel #10
0
def get_model_my_model_1(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 30

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    #############################################################################################

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    net_local1 = tf_util.conv2d(edge_feature,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                weight_decay=weight_decay,
                                scope='adj_conv1',
                                bn_decay=bn_decay,
                                is_dist=True)

    net_local1_intermediate = net_local1

    net_local1 = tf_util.conv2d(net_local1,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                weight_decay=weight_decay,
                                scope='dgcnn1r',
                                bn_decay=bn_decay,
                                is_dist=True,
                                activation_fn=None)

    net_local1 += net_local1_intermediate
    net_local1_ac = tf.nn.relu(net_local1)
    net_local1 = tf.reduce_max(net_local1_ac, axis=-2, keep_dims=True)

    net_global1 = tf_util.conv2d(input_image,
                                 64, [1, 3],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 weight_decay=weight_decay,
                                 scope='conv1',
                                 bn_decay=bn_decay,
                                 is_dist=True)

    net_global1_intermediate = net_global1

    net_global1 = tf_util.conv2d(net_global1,
                                 64, [1, 1],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 weight_decay=weight_decay,
                                 scope='conv1r',
                                 bn_decay=bn_decay,
                                 is_dist=True,
                                 activation_fn=None)

    net_global1 += net_global1_intermediate
    net_global1 = tf.nn.relu(net_global1)

    points_feat1_concat = tf.concat(axis=-1, values=[net_global1, net_local1])

    ##############################################################################################

    adj = tf_util.pairwise_distance(points_feat1_concat)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat1_concat,
                                            nn_idx=nn_idx,
                                            k=k)

    net_local2 = tf_util.conv2d(edge_feature,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                weight_decay=weight_decay,
                                scope='adj_conv3',
                                bn_decay=bn_decay,
                                is_dist=True)

    net_local2 = tf_util.conv2d(net_local2,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                weight_decay=weight_decay,
                                scope='dgcnn2r',
                                bn_decay=bn_decay,
                                is_dist=True,
                                activation_fn=None)

    net_local2 += net_local1_ac
    net_local2_ac = tf.nn.relu(net_local2)
    net_local2 = tf.reduce_max(net_local2_ac, axis=-2, keep_dims=True)

    net_global2 = tf_util.conv2d(points_feat1_concat,
                                 64, [1, 1],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 weight_decay=weight_decay,
                                 scope='conv2',
                                 bn_decay=bn_decay,
                                 is_dist=True)

    net_global2 = tf_util.conv2d(net_global2,
                                 64, [1, 1],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 weight_decay=weight_decay,
                                 scope='conv2r',
                                 bn_decay=bn_decay,
                                 is_dist=True,
                                 activation_fn=None)

    net_global2 += net_global1
    net_global2 = tf.nn.relu(net_global2)

    points_feat2_concat = tf.concat(axis=-1, values=[net_global2, net_local2])

    ##############################################################################################

    adj = tf_util.pairwise_distance(points_feat2_concat)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat2_concat,
                                            nn_idx=nn_idx,
                                            k=k)

    net_local3 = tf_util.conv2d(edge_feature,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                weight_decay=weight_decay,
                                scope='adj_conv5',
                                bn_decay=bn_decay,
                                is_dist=True)

    net_local3 = tf_util.conv2d(net_local3,
                                64, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                weight_decay=weight_decay,
                                scope='dgcnn3r',
                                bn_decay=bn_decay,
                                is_dist=True,
                                activation_fn=None)

    net_local3 += net_local2_ac
    net_local3_ac = tf.nn.relu(net_local3)
    net_local3 = tf.reduce_max(net_local3_ac, axis=-2, keep_dims=True)

    net_global3 = tf_util.conv2d(points_feat2_concat,
                                 64, [1, 1],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 weight_decay=weight_decay,
                                 scope='conv3',
                                 bn_decay=bn_decay,
                                 is_dist=True)

    net_global3 = tf_util.conv2d(net_global3,
                                 64, [1, 1],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 weight_decay=weight_decay,
                                 scope='conv3r',
                                 bn_decay=bn_decay,
                                 is_dist=True,
                                 activation_fn=None)

    net_global3 += net_global2
    net_global3 = tf.nn.relu(net_global3)

    points_feat3_concat = tf.concat(axis=-1, values=[net_global3, net_local3])

    ##############################################################################################

    out7 = tf_util.conv2d(tf.concat(
        [points_feat1_concat, points_feat2_concat, points_feat3_concat],
        axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          64, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3,
                       values=[
                           expand, points_feat1_concat, points_feat2_concat,
                           points_feat3_concat
                       ])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Beispiel #11
0
def model_part(point_cloud, is_training, k, bn_decay=None):
      out1 = tf_util.conv2d(point_cloud, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv1', bn_decay=bn_decay, is_dist=True)

      out2 = tf_util.conv2d(out1, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv2', bn_decay=bn_decay, is_dist=True)

      net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
      net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

      out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1), 64, [1,1],
                           padding='VALID', stride=[2,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv3', bn_decay=bn_decay, is_dist=True)
      print("out3 = ", out3.shape)
      out3_max = tf.reduce_max(out3, axis=1, keep_dims=True)
      print("out3_max = ", out3_max.shape)
      adj = tf_util.pairwise_distance(tf.squeeze(out3, axis=-2))
      nn_idx = tf_util.knn(adj, k=k)
      edge_feature = tf_util.get_edge_feature(out3, nn_idx=nn_idx, k=k)

      out4 = tf_util.conv2d(edge_feature, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv4', bn_decay=bn_decay, is_dist=True)

      net_max_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)
      net_mean_2 = tf.reduce_mean(out4, axis=-2, keep_dims=True)

      out5 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1), 64, [1,1],
                           padding='VALID', stride=[2,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv5', bn_decay=bn_decay, is_dist=True)
      out5_max = tf.reduce_max(out5, axis=1, keep_dims=True)

      adj = tf_util.pairwise_distance(tf.squeeze(out5, axis=-2))
      nn_idx = tf_util.knn(adj, k=k)
      edge_feature = tf_util.get_edge_feature(out5, nn_idx=nn_idx, k=k)

      out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv6', bn_decay=bn_decay, is_dist=True)

      net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
      net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)

      out7 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1), 64, [1,1],
                           padding='VALID', stride=[2,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv7', bn_decay=bn_decay, is_dist=True)
      out7_max = tf.reduce_max(out7, axis=1, keep_dims=True)

      out8 = tf_util.conv2d(out7, 1024, [1, 1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv8', bn_decay=bn_decay, is_dist=True)
      print("out8 = ", out8.shape)
      out_max = tf.reduce_max(out8, axis=1, keep_dims=True)
      print("out_max = ", out_max.shape)
      return out3_max, out5_max, out7_max, out_max
Beispiel #12
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
              batch_size, num_point, weight_decay=.00004, bn_decay=None):
    bn_decay = bn_decay if bn_decay is not None else 0.9

    with tf.variable_scope("DGCNN"):
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        input_image = tf.expand_dims(point_cloud, -1)

        k = 20
        bn_params = {
            "is_training": is_training,
            "decay": bn_decay,
            'renorm': True
        }

        adj = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(input_image,
                                                nn_idx=nn_idx,
                                                k=k)

        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(edge_feature,
                                            is_training,
                                            bn_decay,
                                            K=3)
        point_cloud_transformed = tf.matmul(point_cloud, transform)

        input_image = tf.expand_dims(point_cloud_transformed, -1)
        adj = tf_util.pairwise_distance(point_cloud_transformed)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(input_image,
                                                nn_idx=nn_idx,
                                                k=k)

        # out1 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
        out1 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv1')

        # out2 = tf_util.conv2d(out1, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv2', bn_decay=bn_decay, is_dist=True)
        out2 = layers.masked_conv2d(
            out1,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv2')

        net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

        adj = tf_util.pairwise_distance(net_1)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

        # out3 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv3', bn_decay=bn_decay, is_dist=True)
        out3 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv3')

        # out4 = tf_util.conv2d(out3, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
        out4 = layers.masked_conv2d(
            out3,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv4')

        net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

        adj = tf_util.pairwise_distance(net_2)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

        # out5 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv5', bn_decay=bn_decay, is_dist=True)
        out5 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv5')

        # out6 = tf_util.conv2d(out5, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

        net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

        # out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training,
        #                      scope='adj_conv7', bn_decay=bn_decay, is_dist=True)
        out7 = layers.masked_conv2d(
            tf.concat([net_1, net_2, net_3], axis=-1),
            1024,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv7')

        # out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')
        out_max = slim.max_pool2d(out7, [num_point, 1],
                                  stride=1,
                                  padding='VALID',
                                  scope='maxpool')

        one_hot_label_expand = tf.reshape(input_label,
                                          [batch_size, 1, 1, cat_num])
        # one_hot_label_expand = tf_util.conv2d(one_hot_label_expand, 64, [1, 1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training,
        #                      scope='one_hot_label_expand', bn_decay=bn_decay, is_dist=True)
        one_hot_label_expand = layers.masked_conv2d(
            one_hot_label_expand,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='one_hot_label_expand')
        out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
        expand = tf.tile(out_max, [1, num_point, 1, 1])

        concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

        # net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            concat,
            256,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv1')
        # net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp1')
        net2 = slim.dropout(net2,
                            keep_prob=0.6,
                            is_training=is_training,
                            scope='seg/dp1')
        # net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            256,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv2')
        # net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp2')
        net2 = slim.dropout(net2,
                            keep_prob=0.6,
                            is_training=is_training,
                            scope='seg/dp2')
        # net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            128,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv3')
        # net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None,
        #           bn=False, scope='seg/conv4', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            part_num,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=None,
            # normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=None,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv4')

        net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Beispiel #13
0
def get_model(point_cloud, is_training, num_classes, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    # num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    # pairwise distance of the points in the point cloud
    adj_matrix = tf_util.pairwise_distance(point_cloud)

    # get indices of k nearest neighbors
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # edge feature
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    # transform net 1
    with tf.variable_scope('transform_net1') as _:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    # point cloud transf
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    # pairwise distance of the points in the point cloud
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)

    # get indices of k nearest neighbors
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # I've got neighbors indices and subregion index (0-7)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    #  net = tf_util.conv2d_reg(point_cloud_transformed, nn_idx,
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn1',
                         bn_decay=bn_decay)

    # Maxpool
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_1 = net

    #############################################################################
    # 2nd block
    #############################################################################

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    # net = tf.reduce_max(net, axis=-2, keep_dims=False)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_2 = net

    #############################################################################
    # 3rd block
    #############################################################################

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_3 = net

    #############################################################################
    # 4rd block
    #############################################################################

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_4 = net

    #############################################################################
    # aggregate block
    #############################################################################

    net = tf.concat([net_1, net_2, net_3, net_4], axis=-1)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net,
                                  num_classes,
                                  activation_fn=None,
                                  scope='fc3')

    return net, end_points
Beispiel #14
0
    def get_model_w_ae_gcn(self, point_cloud, is_training, bn_decay=None):
        """ Classification PointNet, input is BxNx3, output Bx40 """
        tf_util = imp.load_source(
            'tf_util',
            os.path.join(os.path.dirname(self.models["test"]), '../utils',
                         "tf_util.py"))
        transform_nets = imp.load_source(
            'transform_nets',
            os.path.join(os.path.dirname(self.models["test"]),
                         "transform_nets.py"))
        import tf_util
        from transform_nets import input_transform_net
        batch_size = self.configuration.batch_size
        num_point = self.configuration.n_input[0]
        end_points = {}
        k = 20

        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(point_cloud,
                                                nn_idx=nn_idx,
                                                k=k)
        print(adj_matrix, nn_idx, edge_feature)
        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(edge_feature,
                                            is_training,
                                            bn_decay,
                                            K=3)

        point_cloud_transformed = tf.matmul(point_cloud, transform)
        adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                                nn_idx=nn_idx,
                                                k=k)

        net = tf_util.conv2d(edge_feature,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn1',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net1 = net

        adj_matrix = tf_util.pairwise_distance(net)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn2',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net2 = net

        adj_matrix = tf_util.pairwise_distance(net)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn3',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net3 = net

        adj_matrix = tf_util.pairwise_distance(net)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature,
                             128, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn4',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net4 = net

        net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1),
                             1024, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='agg',
                             bn_decay=bn_decay)

        net = tf.reduce_max(net, axis=1, keep_dims=True)

        # MLP on global point cloud vector
        net = tf.reshape(net, [batch_size, -1])
        end_points['post_max'] = net
        net = tf_util.fully_connected(net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp2')
        end_points['final'] = net
        net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

        return net, end_points
Beispiel #15
0
def get_model(input_tensor, is_training, bn_decay = None):    
    weight_decay = 0.0
    num_point = input_tensor.get_shape()[1].value
    
    k = 40


    #Transform Net
    adj_matrix = tf_util.pairwise_distance(input_tensor)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(input_tensor, nn_idx=nn_idx, k=k)


    #Transform Net    
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature, is_training, bn_decay, K=input_tensor.get_shape()[2], is_dist=True)
    input_tensor_transformed = tf.matmul(input_tensor, transform)
    adj_matrix = tf_util.pairwise_distance(input_tensor_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(input_tensor_transformed, nn_idx=nn_idx, k=k)

    out1_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='one/adj_conv1', bn_decay=bn_decay, is_dist=True)

    
    out1_2 = tf_util.conv2d(out1_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='one/adj_conv2', bn_decay=bn_decay, is_dist=True)

        
    out1_3 = tf_util.conv2d(out1_2, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='one/adj_conv3', bn_decay=bn_decay, is_dist=True)

    net_1 = tf.reduce_max(out1_3, axis=-2, keepdims=True)



    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out2_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='two/adj_conv1', bn_decay=bn_decay, is_dist=True)

    out2_2 = tf_util.conv2d(out2_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='two/adj_conv2', bn_decay=bn_decay, is_dist=True)

    out2_3 = tf_util.conv2d(out2_2, 64, [1,1],
                            padding='VALID', stride=[1,1],
                            bn=True, is_training=is_training, weight_decay=weight_decay,
                            scope='two/adj_conv3', bn_decay=bn_decay, is_dist=True)
                            
    net_2 = tf.reduce_max(out2_3, axis=-2, keepdims=True)

      

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out3_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='three/adj_conv1', bn_decay=bn_decay, is_dist=True)


    out3_2 = tf_util.conv2d(out3_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='three/adj_conv2', bn_decay=bn_decay, is_dist=True)


    net_3 = tf.reduce_max(out3_2, axis=-2, keepdims=True)



    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1], 
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training,
                        scope='adj_conv7', bn_decay=bn_decay, is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')


    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, 
                                        net_1,
                                        net_2,
                                        net_3])

    # CONV 
    net = tf_util.conv2d(concat, 512, [1,1], padding='VALID', stride=[1,1],
                bn=True, is_training=is_training, scope='seg/conv1', is_dist=True)
    # net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv2', is_dist=True)
    # net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv3', is_dist=True)
    # net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv4', is_dist=True)
    # net = tf_util.conv2d(net, 32, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv5', is_dist=True)    
    
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    
    net = tf_util.conv2d(net, 16, [1,1], padding='VALID', stride=[1,1],
                activation_fn=None, scope='seg/output', is_dist=True)


    net = tf.squeeze(net, [2])


    return net
Beispiel #16
0
def get_model(point_cloud, is_training, bn_decay, k=20):
    """ Classification DGCNN, input is BxNx3, output Bx40 """

    # EdgeConv functions (MLP implemented as conv2d)
    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn1', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn2', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net2 = net
  
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  

    net = tf_util.conv2d(edge_feature, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn3', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  
    
    net = tf_util.conv2d(edge_feature, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn4', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1], 
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='agg', bn_decay=bn_decay)
    net = tf.squeeze(net, -2)

    # Symmetric function: max pooling
    net = tf.reduce_max(net, axis=1, name='maxpool')

    # MLP on global point cloud vector
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net
Beispiel #17
0
def get_model_my_model(point_cloud, is_training, bn_decay=None):
    
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20
    
    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)
    
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature, is_training, bn_decay, K=3)
    
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    
    # Conv 1
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed, nn_idx=nn_idx, k=k)
    
    net_local1 = tf_util.conv2d(edge_feature, 64, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn1', bn_decay=bn_decay)
    net_local1 = tf.reduce_max(net_local1, axis=-2, keep_dims=True)
    
    net_local1_intermediate = net_local1
    
    net_local1 = tf_util.conv2d(net_local1, 64, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn1', bn_decay=bn_decay, activation_fn=None)
    net_local1 = tf.reduce_max(net_local1, axis=-2, keep_dims=True)
    
    net_local1 += net_local1_intermediate
    net_local1 = tf.nn.relu(net_local1)
    
    #net1 = net_local1
    
    net_local_vector1 = tf_util.max_pool2d(net_local1, [num_point,1],
                         padding='VALID', scope='maxpool1')
    
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net_global1 = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    
    net_global_vector1 = tf_util.max_pool2d(net_global1, [num_point,1],
                           padding='VALID', scope='maxpool1')
    
    points_feat1_concat = tf.concat(axis=-1, values=[net_global_vector1, net_local_vector1])
    points_feat1_concat = tf.reduce_max(points_feat1_concat, axis=-2, keep_dims=True)
    
    # Conv 2
    adj_matrix = tf_util.pairwise_distance(points_feat1_concat)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat1_concat, nn_idx=nn_idx, k=k)
    
    net_local2 = tf_util.conv2d(edge_feature, 64, [1,1], padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training, scope='dgcnn2', bn_decay=bn_decay)
    net_local2 = tf.reduce_max(net_local2, axis=-2, keep_dims=True)
    #net2 = net_local2
    
    net_local_vector2 = tf_util.max_pool2d(net_local2, [num_point,1],
                         padding='VALID', scope='maxpool2')
    

    net_global2 = tf_util.conv2d(points_feat1_concat, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    
    net_global_vector2 = tf_util.max_pool2d(net_global2, [num_point,1],
                           padding='VALID', scope='maxpool2')
    
    points_feat2_concat = tf.concat(axis=-1, values=[net_global_vector2, net_local_vector2])
    
    # Conv 3
    adj_matrix = tf_util.pairwise_distance(points_feat2_concat)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat2_concat, nn_idx=nn_idx, k=k)
    
    net_local3 = tf_util.conv2d(edge_feature, 64, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn3', bn_decay=bn_decay)
    net_local3 = tf.reduce_max(net_local3, axis=-2, keep_dims=True)
    #net3 = net_local3
    
    net_local_vector3 = tf_util.max_pool2d(net_local3, [num_point,1],
                         padding='VALID', scope='maxpool3')
    

    net_global3 = tf_util.conv2d(points_feat2_concat, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    
    net_global_vector3 = tf_util.max_pool2d(net_global3, [num_point,1],
                           padding='VALID', scope='maxpool3')
    
    points_feat3_concat = tf.concat(axis=-1, values=[net_global_vector3, net_local_vector3])
    
    # Conv 4
    adj_matrix = tf_util.pairwise_distance(points_feat3_concat)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(points_feat3_concat, nn_idx=nn_idx, k=k)
    
    net_local4 = tf_util.conv2d(edge_feature, 128, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='dgcnn4', bn_decay=bn_decay)
    net_local4 = tf.reduce_max(net_local4, axis=-2, keep_dims=True)
    #net4 = net_local4
    
    net_local_vector4 = tf_util.max_pool2d(net_local4, [num_point,1],
                         padding='VALID', scope='maxpool4')
    

    net_global4 = tf_util.conv2d(points_feat3_concat, 128, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    
    net_global_vector4 = tf_util.max_pool2d(net_global4, [num_point,1],
                           padding='VALID', scope='maxpool4')
    
    points_feat4_concat = tf.concat(axis=-1, values=[net_global_vector4, net_local_vector4])
    
    # Conv 5
    net_concat = tf_util.conv2d(tf.concat([points_feat1_concat, points_feat2_concat, points_feat3_concat, points_feat4_concat], axis=-1), 1024, [1,1],
                     padding='VALID', stride=[1,1],
                     bn=True, is_training=is_training,
                     scope='conv5', bn_decay=bn_decay)
    
    # Symmetry Aggregation
    net_agg = tf_util.max_pool2d(net_concat, [num_point,1],
                         padding='VALID', scope='maxpool_agg')
    
    net = tf.reshape(net_agg, [batch_size, -1])
    #net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
    #                              scope='fc1', bn_decay=bn_decay)
    #net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                      scope='dp1')
    #net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
    #                              scope='fc2', bn_decay=bn_decay)
    #net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                      scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
    
    return net, end_points
Beispiel #18
0
def get_model_dgcnn(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    # with tf.variable_scope('transform_net1') as sc:
    #     transform = input_transform_net(edge_feature, is_training, bn_decay, K=3)
    #
    # point_cloud_transformed = tf.matmul(point_cloud, transform)
    # adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    # nn_idx = tf_util.knn(adj_matrix, k=k)
    # edge_feature = tf_util.get_edge_feature(point_cloud_transformed, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        # assert(K==3)
        weights = tf.get_variable('weights', [256, 4],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [4],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 4])
    transform = tf.nn.l2_normalize(transform, dim=1)

    #net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    #net = tf_util.fully_connected(net, 4, activation_fn=None, scope='fc3')

    return transform
Beispiel #19
0
def get_model_dg(point_cloud, is_training, bn_decay=None, K=4):
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """

    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    #input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv2',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=-2, keep_dims=True)

    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='tconv3',
                         bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc1',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='tfc2',
                                  bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        # assert(K==3)
        weights = tf.get_variable('weights', [256, 4],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [4],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [batch_size, 4])
    return transform
Beispiel #20
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  input_image = tf.expand_dims(point_cloud, -1)
  end_points = {}

  k = 25

  adj = tf_util.pairwise_distance(point_cloud)
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

  with tf.variable_scope('transform_net1') as sc:
  transform = input_transform_net(edge_feature, is_training, bn_decay, K=3, is_dist=True)
  point_cloud_transformed = tf.matmul(point_cloud, transform)
  input_image = tf.expand_dims(point_cloud_transformed, -1)
  adj = tf_util.pairwise_distance(point_cloud_transformed)
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)


  out1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
  
  out2 = tf_util.conv2d(out1, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv2', bn_decay=bn_decay, is_dist=True)

  net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
  net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

  out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv3', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out3, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out3, nn_idx=nn_idx, k=k)

  out4 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
  
  net_max_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)
  net_mean_2 = tf.reduce_mean(out4, axis=-2, keep_dims=True)

  out5 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv5', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out5, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out5, nn_idx=nn_idx, k=k)

  out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

  net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
  net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)
    

  out7 = tf_util.conv2d(tf.concat([out3, out5, out6], axis=-1), 1024, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv13', bn_decay=bn_decay, is_dist=True)

  out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')

  one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
  one_hot_label_expand = tf_util.conv2d(one_hot_label_expand, 64, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='one_hot_label_expand', bn_decay=bn_decay, is_dist=True)
  out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
  expand = tf.tile(out_max, [1, num_point, 1, 1])

  concat = tf.concat(axis=3, values=[expand, 
                                     net_max_1,
                                     net_mean_1,
                                     out3,
                                     net_max_2,
                                     net_mean_2,
                                     out5,
                                     net_max_3,
                                     net_mean_3,
                                     out6,
                                     out7])

  net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp1')

  net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp2')

  net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv4', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None, 
            bn=False, scope='seg/conv5', weight_decay=weight_decay, is_dist=True)

  net2 = tf.reshape(net2, [batch_size, num_point, part_num])

  return net2

def get_loss(seg_pred, seg):
  per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
  seg_loss = tf.reduce_mean(per_instance_seg_loss)
  per_instance_seg_pred_res = tf.argmax(seg_pred, 2)
  
  return seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
Beispiel #21
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    # addition of transform layers
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet5',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net5 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net6 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net7 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet7',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net8 = net

    net = tf_util.conv2d(tf.concat(
        [net1, net2, net3, net4, net5, net6, net7, net8], axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')

    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')

    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Beispiel #22
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
        batch_size, num_point, weight_decay, graphnum, featnum,  bn_decay=None):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 30

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -2)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
    net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

    out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1),
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out3A, net_max_1A, net_mean_1A = tf_util.offset_deform(
        input_image,
        out3,
        scope="trans_conv0",
        num_neighbor=k,
        num_graph=graphnum[0],
        num_feat=featnum[0],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)

    out5, net_max_2, net_mean_2 = tf_util.offset_deform(
        input_image,
        out3A,
        scope="trans_conv1",
        num_neighbor=k,
        num_graph=graphnum[0],
        num_feat=featnum[0],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)

    out7, net_max_3, net_mean_3 = tf_util.offset_deform(
        input_image,
        out5,
        scope="trans_conv2",
        num_neighbor=k,
        num_graph=graphnum[1],
        num_feat=featnum[1],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)
    '''adj = tf_util.pairwise_distance(tf.squeeze(trans2, axis=-2))
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(tf.concat([out5,trans2], axis = -1), nn_idx=nn_idx, k=k)

    out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                                             padding='VALID', stride=[1,1],
                                             bn=True, is_training=is_training, weight_decay=weight_decay,
                                             scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
    net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1), 64, [1,1],
                                             padding='VALID', stride=[1,1],
                                             bn=True, is_training=is_training, weight_decay=weight_decay,
                                             scope='adj_conv7', bn_decay=bn_decay, is_dist=True)'''

    out8 = tf_util.conv2d(tf.concat([out3, out5, out7], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv13',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out8, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          128, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3,
                       values=[
                           expand, net_max_1, net_mean_1, out3, net_max_2,
                           net_mean_2, out5, net_max_3, net_mean_3, out7, out8
                       ])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Beispiel #23
0
def calc_ldgcnn_feature(point_cloud, is_training, bn_decay=None):
    # B: batch size; N: number of points, C: channels; k: number of nearest neighbors
    # point_cloud: B*N*3
    k = 20

    # adj_matrix: B*N*N
    adj_matrix = tf_util.pairwise_distance(point_cloud)
    # Find the indices of knearest neighbors.
    nn_idx = tf_util.knn(adj_matrix, k=k)

    point_cloud = tf.expand_dims(point_cloud, axis=-2)

    # Edge_feature: B*N*k*6
    # The vector in the last dimension represents: (Xc,Yc,Zc, Xck - Xc, Yck-Yc, Yck-zc)
    # (Xc,Yc,Zc) is the central point. (Xck - Xc, Yck-Yc, Yck-zc) is the edge vector.
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    # net: B*N*k*64
    # The kernel size of CNN is 1*1, and thus this is a MLP with sharing parameters.
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn1',
                         bn_decay=bn_decay)

    # net: B*N*1*64
    # Extract the biggest feature from k convolutional edge features.
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    # adj_matrix: B*N*N
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # net: B*N*1*67
    # Link the Hierarchical features.
    net = tf.concat([point_cloud, net1], axis=-1)

    # edge_feature: B*N*k*134
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    # net: B*N*k*64
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    # net: B*N*1*64
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # net: B*N*1*131
    net = tf.concat([point_cloud, net1, net2], axis=-1)

    # edge_feature: B*N*k*262
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    # net: B*N*k*64
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    # net: B*N*1*64
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # net: B*N*1*195
    net = tf.concat([point_cloud, net1, net2, net3], axis=-1)
    # edge_feature: B*N*k*390
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    # net: B*N*k*128
    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn4',
                         bn_decay=bn_decay)
    # net: B*N*1*128
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    # input: B*N*1*323
    # net: B*N*1*1024
    net = tf_util.conv2d(tf.concat([point_cloud, net1, net2, net3, net4],
                                   axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)
    # net: B*1*1*1024
    net = tf.reduce_max(net, axis=1, keep_dims=True)
    # net: B*1024
    net = tf.squeeze(net)
    return net
Beispiel #24
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 20

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out3 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out4 = tf_util.conv2d(out3,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv4',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out5 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv5',
                          bn_decay=bn_decay,
                          is_dist=True)

    # out6 = tf_util.conv2d(out5, 64, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training, weight_decay=weight_decay,
    #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          64, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Beispiel #25
0
def get_model(point_cloud, is_training, bn_decay=None):
  """ ConvNet baseline, input is BxNx9 gray image """
  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  input_image = tf.expand_dims(point_cloud, -1)

  k = 30

  adj = tf_util.pairwise_distance(point_cloud[:, :, 6:])
  nn_idx = tf_util.knn(adj, k=k) # (batch, num_points, k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

  samp_out1, samp_out2, samp_out3, globle_feat = model_part(edge_feature, is_training, k, bn_decay)

  out1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv1', bn_decay=bn_decay, is_dist=True)

  out2 = tf_util.conv2d(out1, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv2', bn_decay=bn_decay, is_dist=True)

  net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
  net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

  out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv3', bn_decay=bn_decay, is_dist=True)

  out1_expand = tf.tile(tf.reshape(samp_out1, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  out1_concat = tf.concat(axis=3, values=[out3, out1_expand])
  print("out1_concat = ", out1_concat.shape)

  out4 = tf_util.conv2d(out1_concat, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv4', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out4, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out4, nn_idx=nn_idx, k=k)

  out5 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv5', bn_decay=bn_decay, is_dist=True)

  net_max_2 = tf.reduce_max(out5, axis=-2, keep_dims=True)
  net_mean_2 = tf.reduce_mean(out5, axis=-2, keep_dims=True)

  out6 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

  out2_expand = tf.tile(tf.reshape(samp_out2, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  out2_concat = tf.concat(axis=3, values=[out6, out2_expand])
  out7 = tf_util.conv2d(out2_concat, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv7', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out7, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out7, nn_idx=nn_idx, k=k)

  out8 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv8', bn_decay=bn_decay, is_dist=True)

  net_max_3 = tf.reduce_max(out8, axis=-2, keep_dims=True)
  net_mean_3 = tf.reduce_mean(out8, axis=-2, keep_dims=True)

  out9 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv9', bn_decay=bn_decay, is_dist=True)
  out3_expand = tf.tile(tf.reshape(samp_out3, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  out3_concat = tf.concat(axis=3, values=[out9, out3_expand])
  out10 = tf_util.conv2d(out3_concat, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv10', bn_decay=bn_decay, is_dist=True)

  out11 = tf_util.conv2d(tf.concat([out4, out7, out10], axis=-1), 1024, [1, 1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv11', bn_decay=bn_decay, is_dist=True)

  out_max = tf_util.max_pool2d(out11, [num_point,1], padding='VALID', scope='maxpool')

  expand = tf.tile(out_max, [1, num_point, 1, 1])

  concat = tf.concat(axis=3, values=[expand,
                                     net_max_1,
                                     net_mean_1,
                                     out4,
                                     net_max_2,
                                     net_mean_2,
                                     out7,
                                     net_max_3,
                                     net_mean_3,
                                     out10,
                                     out11])
  print("concat = ", concat.shape)
  # CONCAT
  globle_feat_expand = tf.tile(tf.reshape(globle_feat, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  points_feat1_concat = tf.concat(axis=3, values=[concat, globle_feat_expand])
  print("points_feat1_concat = ", points_feat1_concat.shape)

  # CONV
  net = tf_util.conv2d(points_feat1_concat, 512, [1,1], padding='VALID', stride=[1,1],
             bn=True, is_training=is_training, scope='seg/conv1', is_dist=True)
  net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
             bn=True, is_training=is_training, scope='seg/conv2', is_dist=True)
  net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
  net = tf_util.conv2d(net, 13, [1,1], padding='VALID', stride=[1,1],
             activation_fn=None, scope='seg/conv3', is_dist=True)
  net = tf.squeeze(net, [2])

  return net
Beispiel #26
0
def get_model(point_cloud, input_label, is_training, bn_decay, k=20):
    """ Classification DGCNN, input is BxNx3, output BxNx50 """
    cat_num = 16
    part_num = 50
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv1',
                          bn_decay=bn_decay)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv2',
                          bn_decay=bn_decay)

    net_max_1 = tf.reduce_max(out2, axis=-2, keepdims=True, name='maxpool1')
    net_mean_1 = tf.reduce_mean(out2, axis=-2, keepdims=True, name='meanpool1')

    out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1),
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv3',
                          bn_decay=bn_decay)

    adj = tf_util.pairwise_distance(tf.squeeze(out3, axis=-2))
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(tf.squeeze(out3, axis=-2),
                                            nn_idx=nn_idx,
                                            k=k)

    out4 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv4',
                          bn_decay=bn_decay)

    net_max_2 = tf.reduce_max(out4, axis=-2, keepdims=True, name='maxpool2')
    net_mean_2 = tf.reduce_mean(out4, axis=-2, keepdims=True, name='meanpool2')

    out5 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1),
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv5',
                          bn_decay=bn_decay)

    adj = tf_util.pairwise_distance(tf.squeeze(out5, axis=-2))
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(tf.squeeze(out5, axis=-2),
                                            nn_idx=nn_idx,
                                            k=k)

    out6 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv6',
                          bn_decay=bn_decay)

    net_max_3 = tf.reduce_max(out6, axis=-2, keepdims=True, name='maxpool3')
    net_mean_3 = tf.reduce_mean(out6, axis=-2, keepdims=True, name='meanpool3')

    out7 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1),
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay)

    out8 = tf_util.conv2d(tf.concat([out3, out5, out7], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv8',
                          bn_decay=bn_decay)

    out_max = tf.reduce_max(out8, axis=1, keepdims=True, name='maxpool4')

    one_hot_label = tf.one_hot(input_label, cat_num)
    one_hot_label_expand = tf.reshape(one_hot_label,
                                      [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          128, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3,
                       values=[
                           expand, net_max_1, net_mean_1, out3, net_max_2,
                           net_mean_2, out5, net_max_3, net_mean_3, out7, out8
                       ])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1')
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2')
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3')
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4')

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Beispiel #27
0
def model(point_cloud, is_training, cut, num_point, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        cut,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1),
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    return net
Beispiel #28
0
def point_atrous_conv(feature_input,
                      adj_input,
                      dist_input,
                      knn,
                      atrous,
                      radius_min,
                      radius_max,
                      num_output_channels,
                      scope,
                      kernel_size=[1, 1],
                      stride=[1, 1],
                      padding='VALID',
                      use_xavier=True,
                      stddev=1e-3,
                      weight_decay=0.0,
                      activation_fn=tf.nn.relu,
                      bn=False,
                      bn_decay=None,
                      is_training=None,
                      is_dist=False):
    '''
	Input:
		feature_input: (batch_size, npoints, 1, num_features)
		adj_input: (batch_size, num_points, num_points)
		dist_input: (batch_size, num_points, num_points)
		knn: int32
		atrous: int32
		radius_min: float32
		radius_max: float32
		num_output_channels: int32
		kernel_size: a list of 2 ints
		scope: string
		stride: a list of 2 ints
		padding: 'SAME' or 'VALID'
		use_xavier: bool, use xavier_initializer if true
		stddev: float, stddev for truncated_normal init
		weight_decay: float
		activation_fn: function
		bn: bool, whether to use batch norm
		bn_decay: float or float tensor variable in [0,1]
		is_training: bool Tensor variable
	Returns:
		net: (batch_size, num_points, 1, num_output_channels)
	'''
    feature_shape = feature_input.get_shape()
    # batch_size = feature_shape[0]
    num_points = int(feature_shape[1])

    edge_input = feature_input

    if num_points > 1:
        k = int(min(knn, num_points / atrous))
        if k > 1:
            nn_idx = tf_util.get_atrous_knn(adj_input, k, atrous, dist_input,
                                            radius_min, radius_max)
            edge_input = tf_util.get_edge_feature(feature_input, nn_idx, k)
        # else:
        # 	edge_input = feature_input

    net = tf_util.conv2d(edge_input,
                         num_output_channels,
                         kernel_size=kernel_size,
                         padding=padding,
                         stride=stride,
                         use_xavier=use_xavier,
                         stddev=stddev,
                         weight_decay=weight_decay,
                         activation_fn=activation_fn,
                         bn=bn,
                         is_training=is_training,
                         scope=scope,
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)

    return net
Beispiel #29
0
def get_model(point_cloud, is_training, bn_decay=None):
  """ Classification PointNet, input is BxNx3, output Bx40 """
  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  end_points = {}
  k = 20

  adj_matrix = tf_util.pairwise_distance(point_cloud)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)
  print(adj_matrix, nn_idx, edge_feature)  
  with tf.variable_scope('transform_net1') as sc:
    transform = input_transform_net(edge_feature, is_training, bn_decay, K=3)

  point_cloud_transformed = tf.matmul(point_cloud, transform)
  adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(point_cloud_transformed, nn_idx=nn_idx, k=k)

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn1', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net1 = net

  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn2', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net2 = net
 
  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn3', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net3 = net

  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  
  
  net = tf_util.conv2d(edge_feature, 128, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn4', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net4 = net

  net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='agg', bn_decay=bn_decay)
 
  net = tf.reduce_max(net, axis=1, keep_dims=True) 

  # MLP on global point cloud vector
  end_points['post_max'] = net
  net = tf.reshape(net, [batch_size, -1]) 
  net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                scope='fc1', bn_decay=bn_decay)
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                         scope='dp1')
  net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                scope='fc2', bn_decay=bn_decay)
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                        scope='dp2')
  end_points['final'] = net
  net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

  return net, end_points
Beispiel #30
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20  # 近邻点

    # 得到边的特征和点的特征
    adj_matrix = tf_util.pairwise_distance(point_cloud)  # 两两点之间的距离 B*N*N
    nn_idx = tf_util.knn(adj_matrix, k=k)  # B*N*20
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx,
                                            k=k)  # 输入为B*N*3,B*N*20,输出B*N*k*6

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)  # B*3*3

    point_cloud_transformed = tf.matmul(point_cloud, transform)  # 点云对齐

    # dgcnn1
    adj_matrix = tf_util.pairwise_distance(
        point_cloud_transformed
    )  # 输入:batch_size, num_points, num_dims 输出:8*1024*1024
    nn_idx = tf_util.knn(adj_matrix, k=k)  # B*N*20
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)  # B*N*20*6=B*N*K*6

    # 特征提取,卷积核为1*1,输出维度为64
    net = tf_util.conv2d(
        edge_feature,
        64,
        [1, 1],  # B*N*K*64
        padding='VALID',
        stride=[1, 1],
        bn=True,
        is_training=is_training,
        scope='dgcnn1',
        bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)  # 取最大       B*N*1*64
    net1 = net

    # dgcnn2
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx,
                                            k=k)  # B*N*K*128

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    # dgcnn3
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    # dgcnn4
    adj_matrix = tf_util.pairwise_distance(net)  # B*N*N
    nn_idx = tf_util.knn(adj_matrix, k=k)  # B*N*20
    edge_feature = tf_util.get_edge_feature(
        net, nn_idx=nn_idx, k=k)  #  输入:net-B*N*1*64  8*1024*20*128

    net = tf_util.conv2d(
        edge_feature,
        128,
        [1, 1],  # 8*1024*20*128
        padding='VALID',
        stride=[1, 1],
        bn=True,
        is_training=is_training,
        scope='dgcnn4',
        bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)  # 8*1024*1*128
    net4 = net

    net = tf_util.conv2d(
        tf.concat([net1, net2, net3, net4], axis=-1),
        1024,
        [1, 1],  # 输出1024个特征,卷积核1*1
        padding='VALID',
        stride=[1, 1],
        bn=True,
        is_training=is_training,
        scope='agg',
        bn_decay=bn_decay)  # 8*1024*1*1024

    net = tf.reduce_max(net, axis=1, keep_dims=True)  # 8*1*1*128

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])  #  8*1024
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points