Ejemplo n.º 1
0
def get_edgeconv(point_cloud,
                 k,
                 mlp,
                 is_training,
                 bn_decay,
                 scope,
                 bn=True,
                 associated=None,
                 is_dist=False):
    with tf.variable_scope(scope) as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        if associated is not None:
            for j, feature in enumerate(associated):
                point_cloud = tf.concat([point_cloud, feature], axis=-1)

        edge_feature = tf_util.get_edge_feature(point_cloud,
                                                nn_idx=nn_idx,
                                                k=k)

        for i, num_out_channel in enumerate(mlp):
            edge_feature = tf_util.conv2d(edge_feature,
                                          num_out_channel, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='conv%d' % (i),
                                          bn_decay=bn_decay,
                                          is_dist=is_dist)

        edge_feature = tf.reduce_max(edge_feature, axis=-2, keep_dims=True)

        return edge_feature
Ejemplo n.º 2
0
def get_sampled_edgeconv(point_cloud,
                         sampled_point_cloud,
                         k,
                         mlp,
                         is_training,
                         bn_decay,
                         scope,
                         bn=True,
                         is_dist=False):
    with tf.variable_scope(scope) as sc:
        adj_matrix = tf_util.sampled_pairwise_distance(sampled_point_cloud,
                                                       point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        edge_feature = tf_util.get_sampled_edge_feature(sampled_point_cloud,
                                                        point_cloud,
                                                        nn_idx=nn_idx,
                                                        k=k)

        for i, num_out_channel in enumerate(mlp):
            edge_feature = tf_util.conv2d(edge_feature,
                                          num_out_channel, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='conv%d' % (i),
                                          bn_decay=bn_decay,
                                          is_dist=is_dist)

        edge_feature = tf.reduce_max(edge_feature, axis=-2, keep_dims=True)

        return edge_feature
Ejemplo n.º 3
0
def get_model_old_4(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    # point_cloud_2_momentum = tf.concat(
    #     [tf.multiply(point_cloud, point_cloud), tf.multiply(tf.roll(point_cloud, shift=1, axis=-1), point_cloud)],
    #     axis=-1)

    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.new_get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 128, [1, 1],
                         padding='VALID', stride=[1, 1],
                         bn=True, is_training=is_training,
                         scope='dgcnn1', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Ejemplo n.º 4
0
 def knn_loss(self, input_point_cloud, K=10):
     adj_matrix = tf_util.pairwise_distance(input_point_cloud)
     nn_idx = tf_util.knn(adj_matrix, k=K)
     knn_distances = tf_util.get_neighbor_distances(input_point_cloud,
                                                    nn_idx=nn_idx,
                                                    k=K)
     knn_loss_val = tf.reduce_mean(knn_distances, [1, 2])
     return knn_loss_val
Ejemplo n.º 5
0
def get_sampled_edgeconv_groupconv(point_cloud,
                                   sampled_point_cloud,
                                   k,
                                   mlp,
                                   is_training,
                                   bn_decay,
                                   scope,
                                   bn=True,
                                   sampled_pc=None,
                                   pc=None,
                                   is_dist=False):
    with tf.variable_scope(scope) as sc:
        adj_matrix = tf_util.sampled_pairwise_distance(sampled_point_cloud,
                                                       point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        if pc is not None:
            point_cloud = tf.concat(
                [point_cloud, tf.expand_dims(pc, axis=-2)], axis=-1)
        if sampled_pc is not None:
            sampled_point_cloud = tf.concat(
                [sampled_point_cloud,
                 tf.expand_dims(sampled_pc, axis=-2)],
                axis=-1)

        edge_feature, neighbors, concat = tf_util.get_sampled_edge_feature_separate(
            sampled_point_cloud, point_cloud, nn_idx=nn_idx, k=k)

        for i, num_out_channel in enumerate(mlp):
            edge_feature = tf_util.conv2d(edge_feature,
                                          num_out_channel // 2, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='edge_conv%d' % (i),
                                          bn_decay=bn_decay,
                                          is_dist=is_dist)
            neighbors = tf_util.conv2d(neighbors,
                                       num_out_channel // 2, [1, 1],
                                       padding='VALID',
                                       stride=[1, 1],
                                       bn=bn,
                                       is_training=is_training,
                                       scope='neig_conv%d' % (i),
                                       bn_decay=bn_decay,
                                       is_dist=is_dist)

            net = channle_shuffle(edge_feature, neighbors)

            if i < len(mlp) - 1:
                ch = edge_feature.get_shape().as_list()[-1]
                edge_feature = net[:, :, :, 0:ch]
                neighbors = net[:, :, :, ch:]

        net = tf.reduce_max(net, axis=-2, keep_dims=True)

        return net
Ejemplo n.º 6
0
def get_edgeconv_groupconv(point_cloud,
                           k,
                           mlp,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           associated=None):
    with tf.variable_scope(scope) as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        if associated is not None:
            for j, feature in enumerate(associated):
                point_cloud = tf.concat([point_cloud, feature], axis=-1)

        net = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

        for i, num_out_channel in enumerate(mlp):
            center, edge_feature = tf.split(net, num_or_size_splits=2, axis=-1)

            center = tf_util.conv2d(center,
                                    num_out_channel, [1, 1],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='centerconv%d' % (i),
                                    bn_decay=bn_decay)

            edge_feature = tf_util.conv2d(edge_feature,
                                          num_out_channel, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='edgeconv%d' % (i),
                                          bn_decay=bn_decay)

            net = channle_shuffle(center, edge_feature)

        edge_feature = tf.reduce_max(net, axis=-2, keep_dims=True)

        return edge_feature
Ejemplo n.º 7
0
def same_label_topology(point_cloud, seg_label, k):
    # point_cloud bn3
    # seg_label bn50
    # adj = tf_util.pairwise_distance(point_cloud)
    # nn_idx = tf_util.knn(adj, k=k)
    #print('point_cloud',point_cloud.shape)
    #print('seg_label',seg_label)

    seg_label = tf.to_float(seg_label)

    adj_seg = tf_util.pairwise_distance(seg_label)  #bnn
    adj_pc = tf_util.pairwise_distance(point_cloud)  #bnn

    max_dist = tf.reduce_max(tf.reduce_max(adj_pc, axis=-1), axis=-1)

    adj_sl = adj_pc + adj_seg * max_dist

    #print('adj_pc',adj_pc.shape,adj_pc)
    #print('adj_sl',adj_sl.shape)

    nn_idx = tf_util.knn(adj_sl, k=k)

    return nn_idx
Ejemplo n.º 8
0
def get_model(points,
              n_iter,
              is_training,
              bn_decay,
              randinit=False,
              nostop=False,
              k=10):
    T = tf.eye(4, batch_shape=(points.shape[0], ))
    T_deltas = []
    for i in range(n_iter):
        transformed_points = tf_util.transform_points(points, T)
        if not nostop:
            transformed_points = tf.stop_gradient(transformed_points)

        adj_matrix = tf_util.pairwise_distance(transformed_points)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(points, nn_idx=nn_idx, k=k)

        qt = input_transform_net(edge_feature, is_training, bn_decay, randinit)
        T_delta = tf.map_fn(tf_util.qt2mat, qt, dtype=tf.float32)
        T_deltas.append(T_delta)
        T = tf.matmul(T_delta, T)
    transformed_points = tf_util.transform_points(points, T)
    return transformed_points, T, T_deltas
Ejemplo n.º 9
0
def model_part(point_cloud, is_training, k, bn_decay=None):
      out1 = tf_util.conv2d(point_cloud, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv1', bn_decay=bn_decay, is_dist=True)

      out2 = tf_util.conv2d(out1, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv2', bn_decay=bn_decay, is_dist=True)

      net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
      net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

      out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1), 64, [1,1],
                           padding='VALID', stride=[2,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv3', bn_decay=bn_decay, is_dist=True)
      print("out3 = ", out3.shape)
      out3_max = tf.reduce_max(out3, axis=1, keep_dims=True)
      print("out3_max = ", out3_max.shape)
      adj = tf_util.pairwise_distance(tf.squeeze(out3, axis=-2))
      nn_idx = tf_util.knn(adj, k=k)
      edge_feature = tf_util.get_edge_feature(out3, nn_idx=nn_idx, k=k)

      out4 = tf_util.conv2d(edge_feature, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv4', bn_decay=bn_decay, is_dist=True)

      net_max_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)
      net_mean_2 = tf.reduce_mean(out4, axis=-2, keep_dims=True)

      out5 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1), 64, [1,1],
                           padding='VALID', stride=[2,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv5', bn_decay=bn_decay, is_dist=True)
      out5_max = tf.reduce_max(out5, axis=1, keep_dims=True)

      adj = tf_util.pairwise_distance(tf.squeeze(out5, axis=-2))
      nn_idx = tf_util.knn(adj, k=k)
      edge_feature = tf_util.get_edge_feature(out5, nn_idx=nn_idx, k=k)

      out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv6', bn_decay=bn_decay, is_dist=True)

      net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
      net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)

      out7 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1), 64, [1,1],
                           padding='VALID', stride=[2,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv7', bn_decay=bn_decay, is_dist=True)
      out7_max = tf.reduce_max(out7, axis=1, keep_dims=True)

      out8 = tf_util.conv2d(out7, 1024, [1, 1],
                           padding='VALID', stride=[1,1],
                           bn=True, is_training=is_training,
                           scope='samp_conv8', bn_decay=bn_decay, is_dist=True)
      print("out8 = ", out8.shape)
      out_max = tf.reduce_max(out8, axis=1, keep_dims=True)
      print("out_max = ", out_max.shape)
      return out3_max, out5_max, out7_max, out_max
Ejemplo n.º 10
0
def get_model(point_cloud,
              is_training,
              num_class,
              global_pl,
              params,
              weight_decay=None,
              bn_decay=None,
              scname=''):
    ''' input: BxNxF
    Use https://arxiv.org/pdf/1902.08570 as baseline
    output:BxNx(cats*segms)  '''
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -2)

    k = params[0]
    #adj = tf_util.pairwise_distance(point_cloud[:,:,:3])
    adj = tf_util.pairwise_distanceR(point_cloud[:, :, :3])
    n_heads = params[1]
    nn_idx = tf_util.knn(adj, k=k)

    net, locals_transform, coefs = gap_block(k, n_heads, nn_idx, point_cloud,
                                             point_cloud,
                                             ('filter0', params[2]), bn_decay,
                                             weight_decay, is_training, scname)

    net = tf_util.conv2d(net,
                         params[3], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet00',
                         bn_decay=bn_decay)
    net00 = net

    net = tf_util.conv2d(net,
                         params[4], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet01' + scname,
                         bn_decay=bn_decay)
    net01 = net

    net = tf_util.conv2d(net,
                         params[5], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet02' + scname,
                         bn_decay=bn_decay)

    net02 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    adj_conv = nn_idx
    n_heads = params[6]

    net, locals_transform1, coefs2 = gap_block(k, n_heads, nn_idx, net,
                                               point_cloud,
                                               ('filter1', params[7]),
                                               bn_decay, weight_decay,
                                               is_training, scname)

    net = tf_util.conv2d(net,
                         params[8], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet10',
                         bn_decay=bn_decay)
    net10 = net

    net = tf_util.conv2d(net,
                         params[9], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet11' + scname,
                         bn_decay=bn_decay)
    net11 = net

    net = tf_util.conv2d(net,
                         params[10], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet12' + scname,
                         bn_decay=bn_decay)

    net12 = net
    global_expand = tf.reshape(global_pl, [batch_size, 1, 1, -1])
    global_expand = tf.tile(global_expand, [1, num_point, 1, 1])
    global_expand = tf_util.conv2d(global_expand,
                                   16, [1, 1],
                                   padding='VALID',
                                   stride=[1, 1],
                                   bn=True,
                                   is_training=is_training,
                                   scope='global_expand' + scname,
                                   bn_decay=bn_decay)

    net = tf.concat([
        net00, net01, net02, net11, net12, global_expand, locals_transform,
        locals_transform1
    ],
                    axis=-1)

    net = tf_util.conv2d(net,
                         params[8], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='agg' + scname,
                         bn_decay=bn_decay)
    #net_tot = net
    net = tf_util.avg_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='avgpool' + scname)

    expand = tf.tile(net, [1, num_point, 1, 1])
    # net = tf.concat(axis=3, values=[expand,
    #                                 net_tot,
    #                             ])
    net = tf_util.conv2d(expand,
                         params[11], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv2',
                         weight_decay=weight_decay,
                         is_dist=True)
    net = tf_util.dropout(net,
                          keep_prob=0.6,
                          is_training=is_training,
                          scope='seg/dp1')
    net = tf_util.conv2d(net,
                         params[11], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv3',
                         weight_decay=weight_decay,
                         is_dist=True)
    net = tf_util.dropout(net,
                          keep_prob=0.6,
                          is_training=is_training,
                          scope='seg/dp2')
    net = tf_util.conv2d(net,
                         params[12], [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv4',
                         weight_decay=weight_decay,
                         is_dist=True)

    net = tf_util.conv2d(net,
                         num_class, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         bn=False,
                         scope='seg/conv5',
                         weight_decay=weight_decay,
                         is_dist=True)

    net = tf.cond(is_training, lambda: net, lambda: tf.nn.softmax(net))

    net = tf.reshape(net, [batch_size, num_point, num_class])

    return net, coefs, coefs2, adj_conv
Ejemplo n.º 11
0
def calc_ldgcnn_feature(point_cloud, is_training, bn_decay=None):
    # B: batch size; N: number of points, C: channels; k: number of nearest neighbors
    # point_cloud: B*N*3
    k = 20

    # adj_matrix: B*N*N
    adj_matrix = tf_util.pairwise_distance(point_cloud)
    # Find the indices of knearest neighbors.
    nn_idx = tf_util.knn(adj_matrix, k=k)

    point_cloud = tf.expand_dims(point_cloud, axis=-2)

    # Edge_feature: B*N*k*6
    # The vector in the last dimension represents: (Xc,Yc,Zc, Xck - Xc, Yck-Yc, Yck-zc)
    # (Xc,Yc,Zc) is the central point. (Xck - Xc, Yck-Yc, Yck-zc) is the edge vector.
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    # net: B*N*k*64
    # The kernel size of CNN is 1*1, and thus this is a MLP with sharing parameters.
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn1',
                         bn_decay=bn_decay)

    # net: B*N*1*64
    # Extract the biggest feature from k convolutional edge features.
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    # adj_matrix: B*N*N
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # net: B*N*1*67
    # Link the Hierarchical features.
    net = tf.concat([point_cloud, net1], axis=-1)

    # edge_feature: B*N*k*134
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    # net: B*N*k*64
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    # net: B*N*1*64
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # net: B*N*1*131
    net = tf.concat([point_cloud, net1, net2], axis=-1)

    # edge_feature: B*N*k*262
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    # net: B*N*k*64
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    # net: B*N*1*64
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # net: B*N*1*195
    net = tf.concat([point_cloud, net1, net2, net3], axis=-1)
    # edge_feature: B*N*k*390
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    # net: B*N*k*128
    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn4',
                         bn_decay=bn_decay)
    # net: B*N*1*128
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    # input: B*N*1*323
    # net: B*N*1*1024
    net = tf_util.conv2d(tf.concat([point_cloud, net1, net2, net3, net4],
                                   axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)
    # net: B*1*1*1024
    net = tf.reduce_max(net, axis=1, keep_dims=True)
    # net: B*1024
    net = tf.squeeze(net)
    return net
Ejemplo n.º 12
0
def get_model(point_cloud, is_training, bn_decay=None):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 1
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud,
                                                   16,
                                                   nn_idx,
                                                   activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6,
                                                   is_training=is_training,
                                                   bn_decay=bn_decay,
                                                   layer='layer0',
                                                   k=k,
                                                   i=i)
        attns.append(edge_feature)
        local_features.append(locals)
    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat(
        [tf.expand_dims(point_cloud, -2), neighbors_features], axis=-1)

    locals_max_transform = tf.reduce_max(tf.concat(local_features, axis=-1),
                                         axis=-2,
                                         keep_dims=True)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(neighbors_features,
                                        locals_max_transform,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)

    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 4
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud_transformed,
                                                   16,
                                                   nn_idx,
                                                   activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6,
                                                   is_training=is_training,
                                                   bn_decay=bn_decay,
                                                   layer='layer1',
                                                   k=k,
                                                   i=i)
        attns.append(edge_feature)
        local_features.append(locals)

    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat(
        [tf.expand_dims(point_cloud_transformed, -2), neighbors_features],
        axis=-1)

    net = tf_util.conv2d(neighbors_features,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet1',
                         bn_decay=bn_decay)
    net1 = net

    locals_max = tf.reduce_max(tf.concat(local_features, axis=-1),
                               axis=-2,
                               keep_dims=True)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet2',
                         bn_decay=bn_decay)
    net2 = net

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet3',
                         bn_decay=bn_decay)
    net3 = net

    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet4',
                         bn_decay=bn_decay)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4, locals_max],
                                   axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Ejemplo n.º 13
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
              batch_size, num_point, weight_decay=.00004, bn_decay=None):
    bn_decay = bn_decay if bn_decay is not None else 0.9

    with tf.variable_scope("DGCNN"):
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        input_image = tf.expand_dims(point_cloud, -1)

        k = 20
        bn_params = {
            "is_training": is_training,
            "decay": bn_decay,
            'renorm': True
        }

        adj = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(input_image,
                                                nn_idx=nn_idx,
                                                k=k)

        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(edge_feature,
                                            is_training,
                                            bn_decay,
                                            K=3)
        point_cloud_transformed = tf.matmul(point_cloud, transform)

        input_image = tf.expand_dims(point_cloud_transformed, -1)
        adj = tf_util.pairwise_distance(point_cloud_transformed)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(input_image,
                                                nn_idx=nn_idx,
                                                k=k)

        # out1 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
        out1 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv1')

        # out2 = tf_util.conv2d(out1, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv2', bn_decay=bn_decay, is_dist=True)
        out2 = layers.masked_conv2d(
            out1,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv2')

        net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

        adj = tf_util.pairwise_distance(net_1)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

        # out3 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv3', bn_decay=bn_decay, is_dist=True)
        out3 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv3')

        # out4 = tf_util.conv2d(out3, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
        out4 = layers.masked_conv2d(
            out3,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv4')

        net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

        adj = tf_util.pairwise_distance(net_2)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

        # out5 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv5', bn_decay=bn_decay, is_dist=True)
        out5 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv5')

        # out6 = tf_util.conv2d(out5, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

        net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

        # out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training,
        #                      scope='adj_conv7', bn_decay=bn_decay, is_dist=True)
        out7 = layers.masked_conv2d(
            tf.concat([net_1, net_2, net_3], axis=-1),
            1024,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv7')

        # out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')
        out_max = slim.max_pool2d(out7, [num_point, 1],
                                  stride=1,
                                  padding='VALID',
                                  scope='maxpool')

        one_hot_label_expand = tf.reshape(input_label,
                                          [batch_size, 1, 1, cat_num])
        # one_hot_label_expand = tf_util.conv2d(one_hot_label_expand, 64, [1, 1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training,
        #                      scope='one_hot_label_expand', bn_decay=bn_decay, is_dist=True)
        one_hot_label_expand = layers.masked_conv2d(
            one_hot_label_expand,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='one_hot_label_expand')
        out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
        expand = tf.tile(out_max, [1, num_point, 1, 1])

        concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

        # net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            concat,
            256,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv1')
        # net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp1')
        net2 = slim.dropout(net2,
                            keep_prob=0.6,
                            is_training=is_training,
                            scope='seg/dp1')
        # net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            256,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv2')
        # net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp2')
        net2 = slim.dropout(net2,
                            keep_prob=0.6,
                            is_training=is_training,
                            scope='seg/dp2')
        # net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            128,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv3')
        # net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None,
        #           bn=False, scope='seg/conv4', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            part_num,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=None,
            # normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=None,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv4')

        net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Ejemplo n.º 14
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
        batch_size, num_point, weight_decay, graphnum, featnum,  bn_decay=None):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 30

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -2)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
    net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

    out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1),
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out3A, net_max_1A, net_mean_1A = tf_util.offset_deform(
        input_image,
        out3,
        scope="trans_conv0",
        num_neighbor=k,
        num_graph=graphnum[0],
        num_feat=featnum[0],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)

    out5, net_max_2, net_mean_2 = tf_util.offset_deform(
        input_image,
        out3A,
        scope="trans_conv1",
        num_neighbor=k,
        num_graph=graphnum[0],
        num_feat=featnum[0],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)

    out7, net_max_3, net_mean_3 = tf_util.offset_deform(
        input_image,
        out5,
        scope="trans_conv2",
        num_neighbor=k,
        num_graph=graphnum[1],
        num_feat=featnum[1],
        weight_decay=weight_decay,
        is_training=is_training,
        bn_decay=bn_decay)
    '''adj = tf_util.pairwise_distance(tf.squeeze(trans2, axis=-2))
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(tf.concat([out5,trans2], axis = -1), nn_idx=nn_idx, k=k)

    out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                                             padding='VALID', stride=[1,1],
                                             bn=True, is_training=is_training, weight_decay=weight_decay,
                                             scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
    net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1), 64, [1,1],
                                             padding='VALID', stride=[1,1],
                                             bn=True, is_training=is_training, weight_decay=weight_decay,
                                             scope='adj_conv7', bn_decay=bn_decay, is_dist=True)'''

    out8 = tf_util.conv2d(tf.concat([out3, out5, out7], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv13',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out8, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          128, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3,
                       values=[
                           expand, net_max_1, net_mean_1, out3, net_max_2,
                           net_mean_2, out5, net_max_3, net_mean_3, out7, out8
                       ])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
def get_model(point_cloud,
              is_training,
              num_class,
              weight_decay=None,
              bn_decay=None,
              scname=''):
    ''' input: BxNxF
    output:BxNx(cats*segms)  '''
    batch_size = point_cloud.get_shape()[0]
    num_point = point_cloud.get_shape()[1]
    num_feat = point_cloud.get_shape()[2]

    k = 10
    adj = tf_util.pairwise_distanceR(point_cloud[:, :, :3])
    n_heads = 1
    nn_idx = tf_util.knn(adj, k=k)

    net, locals_transform, coefs = gap_block(k, n_heads, nn_idx, point_cloud,
                                             point_cloud, ('filter0', 16),
                                             bn_decay, weight_decay,
                                             is_training, scname)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet01' + scname,
                         bn_decay=bn_decay)
    net01 = net

    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet02' + scname,
                         bn_decay=bn_decay)

    net02 = net
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    adj_conv = nn_idx
    n_heads = 1

    net, locals_transform1, coefs2 = gap_block(k, n_heads, nn_idx, net,
                                               point_cloud, ('filter1', 128),
                                               bn_decay, weight_decay,
                                               is_training, scname)

    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet11' + scname,
                         bn_decay=bn_decay)
    net11 = net

    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet12' + scname,
                         bn_decay=bn_decay)

    net12 = net

    net = tf.concat(
        [net01, net02, net11, net12, locals_transform, locals_transform1],
        axis=-1)

    net = tf_util.conv2d(net,
                         3, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='agg' + scname,
                         bn_decay=bn_decay)

    net = tf_util.avg_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='avgpool' + scname)
    max_pool = net

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  activation_fn=tf.nn.relu,
                                  scope='fc1' + scname,
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  128,
                                  bn=True,
                                  is_training=is_training,
                                  activation_fn=tf.nn.relu,
                                  scope='fc2' + scname,
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  num_class,
                                  activation_fn=None,
                                  scope='fc3' + scname)

    net = tf.squeeze(net)

    return net, max_pool
Ejemplo n.º 16
0
def get_model(point_cloud, is_training, bn_decay=None):
  """ Classification PointNet, input is BxNx3, output Bx40 """
  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  end_points = {}
  k = 20

  adj_matrix = tf_util.pairwise_distance(point_cloud)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)
  print(adj_matrix, nn_idx, edge_feature)  
  with tf.variable_scope('transform_net1') as sc:
    transform = input_transform_net(edge_feature, is_training, bn_decay, K=3)

  point_cloud_transformed = tf.matmul(point_cloud, transform)
  adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(point_cloud_transformed, nn_idx=nn_idx, k=k)

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn1', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net1 = net

  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn2', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net2 = net
 
  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn3', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net3 = net

  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  
  
  net = tf_util.conv2d(edge_feature, 128, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn4', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net4 = net

  net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='agg', bn_decay=bn_decay)
 
  net = tf.reduce_max(net, axis=1, keep_dims=True) 

  # MLP on global point cloud vector
  end_points['post_max'] = net
  net = tf.reshape(net, [batch_size, -1]) 
  net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                scope='fc1', bn_decay=bn_decay)
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                         scope='dp1')
  net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                scope='fc2', bn_decay=bn_decay)
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                        scope='dp2')
  end_points['final'] = net
  net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

  return net, end_points
Ejemplo n.º 17
0
    def get_model_w_ae_gcn(self, point_cloud, is_training, bn_decay=None):
        """ Classification PointNet, input is BxNx3, output Bx40 """
        tf_util = imp.load_source(
            'tf_util',
            os.path.join(os.path.dirname(self.models["test"]), '../utils',
                         "tf_util.py"))
        transform_nets = imp.load_source(
            'transform_nets',
            os.path.join(os.path.dirname(self.models["test"]),
                         "transform_nets.py"))
        import tf_util
        from transform_nets import input_transform_net
        batch_size = self.configuration.batch_size
        num_point = self.configuration.n_input[0]
        end_points = {}
        k = 20

        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(point_cloud,
                                                nn_idx=nn_idx,
                                                k=k)
        print(adj_matrix, nn_idx, edge_feature)
        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(edge_feature,
                                            is_training,
                                            bn_decay,
                                            K=3)

        point_cloud_transformed = tf.matmul(point_cloud, transform)
        adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                                nn_idx=nn_idx,
                                                k=k)

        net = tf_util.conv2d(edge_feature,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn1',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net1 = net

        adj_matrix = tf_util.pairwise_distance(net)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn2',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net2 = net

        adj_matrix = tf_util.pairwise_distance(net)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn3',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net3 = net

        adj_matrix = tf_util.pairwise_distance(net)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature,
                             128, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dgcnn4',
                             bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)
        net4 = net

        net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1),
                             1024, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='agg',
                             bn_decay=bn_decay)

        net = tf.reduce_max(net, axis=1, keep_dims=True)

        # MLP on global point cloud vector
        net = tf.reshape(net, [batch_size, -1])
        end_points['post_max'] = net
        net = tf_util.fully_connected(net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp2')
        end_points['final'] = net
        net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

        return net, end_points
Ejemplo n.º 18
0
def forward(point_cloud, is_training, bn_decay=None):
    """LPD-Net:FNSF,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 13,
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    batch_num_queries = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_points = point_cloud.get_shape()[2].value
    CLUSTER_SIZE=64
    OUTPUT_DIM=256
    k=20
    point_cloud = tf.reshape(point_cloud, [batch_num_queries*num_pointclouds_per_query, num_points,13])

    point_cloud, feature_cloud = tf.split(point_cloud, [3,10], 2)

    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)

    # Neural Network to learn neighborhood features
    # feature_cloud = neural_feature_net(point_cloud, is_training, bn_decay, knn_k=20, F=10)

    point_cloud_input = tf.concat([point_cloud_transformed, feature_cloud], 2)

    point_cloud_input = tf.expand_dims(point_cloud_input, -1)

    net = tf_util.conv2d(point_cloud_input, 64, [1, 13],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net, is_training, bn_decay, K=64)
    feature_transform = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)

    # Serial structure
    # Danymic Graph cnn for feature space
    with tf.variable_scope('DGfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(feature_transform)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # Spatial Neighborhood fusion for cartesian space
    with tf.variable_scope('SNfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        idx_ = tf.range(batch_num_queries*num_pointclouds_per_query) * num_points
        idx_ = tf.reshape(idx_, [batch_num_queries*num_pointclouds_per_query, 1, 1])

        feature_cloud = tf.reshape(net, [-1, 64])
        edge_feature = tf.gather(feature_cloud, nn_idx+idx_)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # MLP for fusion
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    point_wise_feature = net

    NetVLAD = lp.NetVLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE, 
                    output_dim=OUTPUT_DIM, gating=True, add_batch_norm=True,
                    is_training=is_training)

    net= tf.reshape(net,[-1,1024])
    net = tf.nn.l2_normalize(net,1)
    output = NetVLAD.forward(net)
    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output,1)
    output =  tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM])

    return output
Ejemplo n.º 19
0
def model(point_cloud, is_training, cut, num_point, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        cut,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'dgcnn4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1),
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope=cut + 'agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    return net
Ejemplo n.º 20
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20  # 近邻点

    # 得到边的特征和点的特征
    adj_matrix = tf_util.pairwise_distance(point_cloud)  # 两两点之间的距离 B*N*N
    nn_idx = tf_util.knn(adj_matrix, k=k)  # B*N*20
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx,
                                            k=k)  # 输入为B*N*3,B*N*20,输出B*N*k*6

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)  # B*3*3

    point_cloud_transformed = tf.matmul(point_cloud, transform)  # 点云对齐

    # dgcnn1
    adj_matrix = tf_util.pairwise_distance(
        point_cloud_transformed
    )  # 输入:batch_size, num_points, num_dims 输出:8*1024*1024
    nn_idx = tf_util.knn(adj_matrix, k=k)  # B*N*20
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)  # B*N*20*6=B*N*K*6

    # 特征提取,卷积核为1*1,输出维度为64
    net = tf_util.conv2d(
        edge_feature,
        64,
        [1, 1],  # B*N*K*64
        padding='VALID',
        stride=[1, 1],
        bn=True,
        is_training=is_training,
        scope='dgcnn1',
        bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)  # 取最大       B*N*1*64
    net1 = net

    # dgcnn2
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx,
                                            k=k)  # B*N*K*128

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    # dgcnn3
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    # dgcnn4
    adj_matrix = tf_util.pairwise_distance(net)  # B*N*N
    nn_idx = tf_util.knn(adj_matrix, k=k)  # B*N*20
    edge_feature = tf_util.get_edge_feature(
        net, nn_idx=nn_idx, k=k)  #  输入:net-B*N*1*64  8*1024*20*128

    net = tf_util.conv2d(
        edge_feature,
        128,
        [1, 1],  # 8*1024*20*128
        padding='VALID',
        stride=[1, 1],
        bn=True,
        is_training=is_training,
        scope='dgcnn4',
        bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)  # 8*1024*1*128
    net4 = net

    net = tf_util.conv2d(
        tf.concat([net1, net2, net3, net4], axis=-1),
        1024,
        [1, 1],  # 输出1024个特征,卷积核1*1
        padding='VALID',
        stride=[1, 1],
        bn=True,
        is_training=is_training,
        scope='agg',
        bn_decay=bn_decay)  # 8*1024*1*1024

    net = tf.reduce_max(net, axis=1, keep_dims=True)  # 8*1*1*128

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])  #  8*1024
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Ejemplo n.º 21
0
def get_model(point_cloud,
              is_training,
              num_class,
              global_pl,
              weight_decay=None,
              bn_decay=None,
              scname=''):
    ''' input: BxNxF
    output:BxNx(cats*segms)  '''
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    num_feat = point_cloud.get_shape()[2].value

    k = 10
    adj = tf_util.pairwise_distanceR(point_cloud[:, :, :3])
    n_heads = 1
    nn_idx = tf_util.knn(adj, k=k)

    net, locals_transform, coefs = gap_block(k, n_heads, nn_idx, point_cloud,
                                             point_cloud, ('filter0', 16),
                                             bn_decay, weight_decay,
                                             is_training, scname)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet01' + scname,
                         bn_decay=bn_decay)
    net01 = net

    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet02' + scname,
                         bn_decay=bn_decay)

    net02 = net
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    adj_conv = nn_idx
    n_heads = 1

    net, locals_transform1, coefs2 = gap_block(k, n_heads, nn_idx, net,
                                               point_cloud, ('filter1', 128),
                                               bn_decay, weight_decay,
                                               is_training, scname)

    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet11' + scname,
                         bn_decay=bn_decay)
    net11 = net

    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='gapnet12' + scname,
                         bn_decay=bn_decay)

    net12 = net

    global_expand = tf.reshape(global_pl, [batch_size, 1, 1, -1])
    global_expand = tf.tile(global_expand, [1, num_point, 1, 1])
    global_expand = tf_util.conv2d(global_expand,
                                   16, [1, 1],
                                   padding='VALID',
                                   stride=[1, 1],
                                   bn=True,
                                   is_training=is_training,
                                   scope='global_expand' + scname,
                                   bn_decay=bn_decay)

    net = tf.concat([
        net01, net02, net11, net12, global_expand, locals_transform,
        locals_transform1
    ],
                    axis=-1)

    net = tf_util.conv2d(net,
                         2, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=tf.nn.relu,
                         bn=True,
                         is_training=is_training,
                         scope='agg' + scname,
                         bn_decay=bn_decay)

    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='avgpool' + scname)
    max_pool = net
    expand = tf.tile(net, [1, num_point, 1, 1])
    net = tf.concat(axis=3, values=[
        expand,
        net01,
        net11,
    ])
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv2',
                         weight_decay=weight_decay,
                         is_dist=True)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn_decay=bn_decay,
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv4',
                         weight_decay=weight_decay,
                         is_dist=True)

    net = tf_util.conv2d(net,
                         num_class, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         bn=False,
                         scope='seg/conv5',
                         weight_decay=weight_decay,
                         is_dist=True)

    net = tf.reshape(net, [batch_size, num_point, num_class])

    return net, max_pool
Ejemplo n.º 22
0
def get_model(point_cloud, is_training, bn_decay=None):
  """ ConvNet baseline, input is BxNx9 gray image """
  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  input_image = tf.expand_dims(point_cloud, -1)

  k = 30

  adj = tf_util.pairwise_distance(point_cloud[:, :, 6:])
  nn_idx = tf_util.knn(adj, k=k) # (batch, num_points, k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

  samp_out1, samp_out2, samp_out3, globle_feat = model_part(edge_feature, is_training, k, bn_decay)

  out1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv1', bn_decay=bn_decay, is_dist=True)

  out2 = tf_util.conv2d(out1, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv2', bn_decay=bn_decay, is_dist=True)

  net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
  net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

  out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv3', bn_decay=bn_decay, is_dist=True)

  out1_expand = tf.tile(tf.reshape(samp_out1, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  out1_concat = tf.concat(axis=3, values=[out3, out1_expand])
  print("out1_concat = ", out1_concat.shape)

  out4 = tf_util.conv2d(out1_concat, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv4', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out4, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out4, nn_idx=nn_idx, k=k)

  out5 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv5', bn_decay=bn_decay, is_dist=True)

  net_max_2 = tf.reduce_max(out5, axis=-2, keep_dims=True)
  net_mean_2 = tf.reduce_mean(out5, axis=-2, keep_dims=True)

  out6 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

  out2_expand = tf.tile(tf.reshape(samp_out2, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  out2_concat = tf.concat(axis=3, values=[out6, out2_expand])
  out7 = tf_util.conv2d(out2_concat, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv7', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out7, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out7, nn_idx=nn_idx, k=k)

  out8 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv8', bn_decay=bn_decay, is_dist=True)

  net_max_3 = tf.reduce_max(out8, axis=-2, keep_dims=True)
  net_mean_3 = tf.reduce_mean(out8, axis=-2, keep_dims=True)

  out9 = tf_util.conv2d(tf.concat([net_max_3, net_mean_3], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv9', bn_decay=bn_decay, is_dist=True)
  out3_expand = tf.tile(tf.reshape(samp_out3, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  out3_concat = tf.concat(axis=3, values=[out9, out3_expand])
  out10 = tf_util.conv2d(out3_concat, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv10', bn_decay=bn_decay, is_dist=True)

  out11 = tf_util.conv2d(tf.concat([out4, out7, out10], axis=-1), 1024, [1, 1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv11', bn_decay=bn_decay, is_dist=True)

  out_max = tf_util.max_pool2d(out11, [num_point,1], padding='VALID', scope='maxpool')

  expand = tf.tile(out_max, [1, num_point, 1, 1])

  concat = tf.concat(axis=3, values=[expand,
                                     net_max_1,
                                     net_mean_1,
                                     out4,
                                     net_max_2,
                                     net_mean_2,
                                     out7,
                                     net_max_3,
                                     net_mean_3,
                                     out10,
                                     out11])
  print("concat = ", concat.shape)
  # CONCAT
  globle_feat_expand = tf.tile(tf.reshape(globle_feat, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
  points_feat1_concat = tf.concat(axis=3, values=[concat, globle_feat_expand])
  print("points_feat1_concat = ", points_feat1_concat.shape)

  # CONV
  net = tf_util.conv2d(points_feat1_concat, 512, [1,1], padding='VALID', stride=[1,1],
             bn=True, is_training=is_training, scope='seg/conv1', is_dist=True)
  net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
             bn=True, is_training=is_training, scope='seg/conv2', is_dist=True)
  net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
  net = tf_util.conv2d(net, 13, [1,1], padding='VALID', stride=[1,1],
             activation_fn=None, scope='seg/conv3', is_dist=True)
  net = tf.squeeze(net, [2])

  return net
Ejemplo n.º 23
0
def get_model(input_tensor, is_training, bn_decay = None):    
    weight_decay = 0.0
    num_point = input_tensor.get_shape()[1].value
    
    k = 40


    #Transform Net
    adj_matrix = tf_util.pairwise_distance(input_tensor)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(input_tensor, nn_idx=nn_idx, k=k)


    #Transform Net    
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature, is_training, bn_decay, K=input_tensor.get_shape()[2], is_dist=True)
    input_tensor_transformed = tf.matmul(input_tensor, transform)
    adj_matrix = tf_util.pairwise_distance(input_tensor_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(input_tensor_transformed, nn_idx=nn_idx, k=k)

    out1_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='one/adj_conv1', bn_decay=bn_decay, is_dist=True)

    
    out1_2 = tf_util.conv2d(out1_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='one/adj_conv2', bn_decay=bn_decay, is_dist=True)

        
    out1_3 = tf_util.conv2d(out1_2, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='one/adj_conv3', bn_decay=bn_decay, is_dist=True)

    net_1 = tf.reduce_max(out1_3, axis=-2, keepdims=True)



    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out2_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='two/adj_conv1', bn_decay=bn_decay, is_dist=True)

    out2_2 = tf_util.conv2d(out2_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='two/adj_conv2', bn_decay=bn_decay, is_dist=True)

    out2_3 = tf_util.conv2d(out2_2, 64, [1,1],
                            padding='VALID', stride=[1,1],
                            bn=True, is_training=is_training, weight_decay=weight_decay,
                            scope='two/adj_conv3', bn_decay=bn_decay, is_dist=True)
                            
    net_2 = tf.reduce_max(out2_3, axis=-2, keepdims=True)

      

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out3_1 = tf_util.conv2d(edge_feature, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='three/adj_conv1', bn_decay=bn_decay, is_dist=True)


    out3_2 = tf_util.conv2d(out3_1, 64, [1,1],
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training, weight_decay=weight_decay,
                        scope='three/adj_conv2', bn_decay=bn_decay, is_dist=True)


    net_3 = tf.reduce_max(out3_2, axis=-2, keepdims=True)



    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1], 
                        padding='VALID', stride=[1,1],
                        bn=True, is_training=is_training,
                        scope='adj_conv7', bn_decay=bn_decay, is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')


    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, 
                                        net_1,
                                        net_2,
                                        net_3])

    # CONV 
    net = tf_util.conv2d(concat, 512, [1,1], padding='VALID', stride=[1,1],
                bn=True, is_training=is_training, scope='seg/conv1', is_dist=True)
    # net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv2', is_dist=True)
    # net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv3', is_dist=True)
    # net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv4', is_dist=True)
    # net = tf_util.conv2d(net, 32, [1,1], padding='VALID', stride=[1,1],
    #             bn=True, is_training=is_training, scope='seg/conv5', is_dist=True)    
    
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    
    net = tf_util.conv2d(net, 16, [1,1], padding='VALID', stride=[1,1],
                activation_fn=None, scope='seg/output', is_dist=True)


    net = tf.squeeze(net, [2])


    return net
Ejemplo n.º 24
0
def get_model(point_cloud, is_training, bn_decay, k=20):
    """ Classification DGCNN, input is BxNx3, output Bx40 """

    # EdgeConv functions (MLP implemented as conv2d)
    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn1', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn2', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net2 = net
  
    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  

    net = tf_util.conv2d(edge_feature, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn3', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  
    
    net = tf_util.conv2d(edge_feature, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='dgcnn4', bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keepdims=True)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1], 
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='agg', bn_decay=bn_decay)
    net = tf.squeeze(net, -2)

    # Symmetric function: max pooling
    net = tf.reduce_max(net, axis=1, name='maxpool')

    # MLP on global point cloud vector
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net
Ejemplo n.º 25
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    # addition of transform layers
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet5',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net5 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net6 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net7 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet7',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net8 = net

    net = tf_util.conv2d(tf.concat(
        [net1, net2, net3, net4, net5, net6, net7, net8], axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')

    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')

    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Ejemplo n.º 26
0
def get_model(point_cloud, is_training, num_classes, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    # num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    # pairwise distance of the points in the point cloud
    adj_matrix = tf_util.pairwise_distance(point_cloud)

    # get indices of k nearest neighbors
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # edge feature
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    # transform net 1
    with tf.variable_scope('transform_net1') as _:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    # point cloud transf
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    # pairwise distance of the points in the point cloud
    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)

    # get indices of k nearest neighbors
    nn_idx = tf_util.knn(adj_matrix, k=k)

    # I've got neighbors indices and subregion index (0-7)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    #  net = tf_util.conv2d_reg(point_cloud_transformed, nn_idx,
    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn1',
                         bn_decay=bn_decay)

    # Maxpool
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_1 = net

    #############################################################################
    # 2nd block
    #############################################################################

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn2',
                         bn_decay=bn_decay)
    # net = tf.reduce_max(net, axis=-2, keep_dims=False)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_2 = net

    #############################################################################
    # 3rd block
    #############################################################################

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_3 = net

    #############################################################################
    # 4rd block
    #############################################################################

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dgcnn4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net_4 = net

    #############################################################################
    # aggregate block
    #############################################################################

    net = tf.concat([net_1, net_2, net_3, net_4], axis=-1)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net,
                                  num_classes,
                                  activation_fn=None,
                                  scope='fc3')

    return net, end_points
Ejemplo n.º 27
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 20

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out3 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out4 = tf_util.conv2d(out3,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv4',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out5 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv5',
                          bn_decay=bn_decay,
                          is_dist=True)

    # out6 = tf_util.conv2d(out5, 64, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training, weight_decay=weight_decay,
    #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          64, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Ejemplo n.º 28
0
def get_model(point_cloud,
              is_training,
              bn_decay=None,
              weight_decay=None,
              classes=13):
    """ ConvNet baseline, input is BxNx9 gray image """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 20

    adj = tf_util.pairwise_distance(point_cloud[:, :, 6:])
    nn_idx = tf_util.knn(adj, k=k)  # (batch, num_points, k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out3 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out4 = tf_util.conv2d(out3,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv4',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out5 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv5',
                          bn_decay=bn_decay,
                          is_dist=True)

    # out6 = tf_util.conv2d(out5, 64, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training, weight_decay=weight_decay,
    #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

    # CONV
    net = tf_util.conv2d(concat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv1',
                         is_dist=True)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='seg/conv2',
                         is_dist=True)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv2d(net,
                         classes, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='seg/conv3',
                         is_dist=True)
    net = tf.squeeze(net, [2])

    return net
Ejemplo n.º 29
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k_neighbors = (10, 20, 30, 40)
    out_dims2neighbors = (8, 16, 16, 24)
    n_heads = 4
    origin_adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx1 = tf_util.knn(origin_adj_matrix, k=k_neighbors[0])
    nn_idx2 = tf_util.knn(origin_adj_matrix, k=k_neighbors[1])
    nn_idx3 = tf_util.knn(origin_adj_matrix, k=k_neighbors[2])
    nn_idx4 = tf_util.knn(origin_adj_matrix, k=k_neighbors[3])

    # 对齐后的点云进入laplace—smooth过程
    flag1 = 1
    neighbors_edge_feature_list, local_feature_list = multiscale_view_filedGAP(
        point_cloud,
        nn_idx1=nn_idx1,
        nn_idx2=nn_idx2,
        nn_idx3=nn_idx3,
        nn_idx4=nn_idx4,
        out_dims2neighbors=out_dims2neighbors,
        k_neighbors=k_neighbors,
        multi_heads=n_heads,
        scope_flag=flag1,
        is_training=is_training,
        bn_decay=bn_decay)

    # 类似多通道注意力机制最后的输出,将得到的多通道数据串联起来 32*4。由于每个通道都最后做了 relu 激活,所以都为正值
    neighbors_edge_feature = tf.concat(neighbors_edge_feature_list, axis=-1)

    conv_net = tf_util.conv2d(neighbors_edge_feature,
                              128, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv1',
                              bn_decay=bn_decay)
    net1 = conv_net  # 128

    conv_net = tf_util.conv2d(conv_net,
                              64, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv2',
                              bn_decay=bn_decay)
    net2 = conv_net  # 64

    conv_net = tf_util.conv2d(conv_net,
                              64, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv3',
                              bn_decay=bn_decay)
    net3 = conv_net  # 64

    conv_net = tf_util.conv2d(conv_net,
                              64, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv4',
                              bn_decay=bn_decay)
    net4 = conv_net  # 64

    local_feature = tf.concat(local_feature_list, axis=-1)

    net_out_concat = tf.concat([net1, net2, net3, net4, local_feature],
                               axis=-1)
    net_out = tf_util.conv2d(net_out_concat,
                             1024, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='agg',
                             bn_decay=bn_decay)

    net_out = tf.reduce_max(net_out, axis=1, keep_dims=True)

    # MLP on global point cloud vector
    net_out = tf.reshape(net_out, [batch_size, -1])
    net_out = tf_util.fully_connected(net_out,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)

    net_out = tf_util.dropout(net_out,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp1')

    net_out = tf_util.fully_connected(net_out,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)

    net_out = tf_util.dropout(net_out,
                              keep_prob=0.5,
                              is_training=is_training,
                              scope='dp2')

    net_out = tf_util.fully_connected(net_out,
                                      40,
                                      activation_fn=None,
                                      scope='fc3')

    return net_out, end_points
Ejemplo n.º 30
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  input_image = tf.expand_dims(point_cloud, -1)
  end_points = {}

  k = 25

  adj = tf_util.pairwise_distance(point_cloud)
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

  with tf.variable_scope('transform_net1') as sc:
  transform = input_transform_net(edge_feature, is_training, bn_decay, K=3, is_dist=True)
  point_cloud_transformed = tf.matmul(point_cloud, transform)
  input_image = tf.expand_dims(point_cloud_transformed, -1)
  adj = tf_util.pairwise_distance(point_cloud_transformed)
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)


  out1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
  
  out2 = tf_util.conv2d(out1, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv2', bn_decay=bn_decay, is_dist=True)

  net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
  net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

  out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv3', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out3, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out3, nn_idx=nn_idx, k=k)

  out4 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
  
  net_max_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)
  net_mean_2 = tf.reduce_mean(out4, axis=-2, keep_dims=True)

  out5 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv5', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out5, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out5, nn_idx=nn_idx, k=k)

  out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

  net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
  net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)
    

  out7 = tf_util.conv2d(tf.concat([out3, out5, out6], axis=-1), 1024, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv13', bn_decay=bn_decay, is_dist=True)

  out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')

  one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
  one_hot_label_expand = tf_util.conv2d(one_hot_label_expand, 64, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='one_hot_label_expand', bn_decay=bn_decay, is_dist=True)
  out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
  expand = tf.tile(out_max, [1, num_point, 1, 1])

  concat = tf.concat(axis=3, values=[expand, 
                                     net_max_1,
                                     net_mean_1,
                                     out3,
                                     net_max_2,
                                     net_mean_2,
                                     out5,
                                     net_max_3,
                                     net_mean_3,
                                     out6,
                                     out7])

  net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp1')

  net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp2')

  net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv4', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None, 
            bn=False, scope='seg/conv5', weight_decay=weight_decay, is_dist=True)

  net2 = tf.reshape(net2, [batch_size, num_point, part_num])

  return net2

def get_loss(seg_pred, seg):
  per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
  seg_loss = tf.reduce_mean(per_instance_seg_loss)
  per_instance_seg_pred_res = tf.argmax(seg_pred, 2)
  
  return seg_loss, per_instance_seg_loss, per_instance_seg_pred_res