Esempio n. 1
0
def pointNet(point_cloud,  output_dim, is_training,use_bn=False, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx128 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, use_bn, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, use_bn, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=use_bn, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=use_bn, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, output_dim, activation_fn=None, scope='fc3')
    
    net = tf.nn.l2_normalize(net, dim=1)

    return net, end_points
    
Esempio n. 2
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=6)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)
    net = tf_util.conv2d(input_image, 64, [1,6],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    print("net = ", net.shape)
    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    max_net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')
    avg_net = tf_util.avg_pool2d(net, [num_point,1],
                             padding='VALID', scope='avgpool')
    max_avg_net = tf.concat([max_net, avg_net], 3)

    net = tf.reshape(max_avg_net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Esempio n. 3
0
def forward(point_cloud, is_training, bn_decay=None):
    """PointNetVLAD,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 3, 
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    batch_num_queries = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_points = point_cloud.get_shape()[2].value
    CLUSTER_SIZE=64
    OUTPUT_DIM=256
    point_cloud = tf.reshape(point_cloud, [batch_num_queries*num_pointclouds_per_query, num_points,3])

    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net, is_training, bn_decay, K=64)
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    NetVLAD = lp.NetVLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE, 
                    output_dim=OUTPUT_DIM, gating=True, add_batch_norm=True,
                    is_training=is_training)

    net= tf.reshape(net,[-1,1024])
    net = tf.nn.l2_normalize(net,1)
    output = NetVLAD.forward(net)
    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output,1)
    output =  tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM])

    return output
Esempio n. 4
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Esempio n. 5
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)

    return transform
def feature_extractor(point_cloud, feature_length, is_training, bn_decay=None):
    """ extract fix length feature from a point cloud, input is BxNx3, output B x feature_length """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, [2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, feature_length, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    # output feature size = B x feature_length
    feature = tf.reshape(net, [batch_size, -1])

    return feature, end_points
Esempio n. 7
0
def trans_net(point_cloud,
              k=20,
              is_training=False,
              bn_decay=None,
              scope='transform_net1'):
    # input (B, N, 3)  return(B, N, 3)
    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope(scope) as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)

    return point_cloud_transformed
Esempio n. 8
0
def get_model(point_cloud, is_training, bn_decay=None):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 1
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud,
                                                   16,
                                                   nn_idx,
                                                   activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6,
                                                   is_training=is_training,
                                                   bn_decay=bn_decay,
                                                   layer='layer0',
                                                   k=k,
                                                   i=i)
        attns.append(edge_feature)
        local_features.append(locals)
    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat(
        [tf.expand_dims(point_cloud, -2), neighbors_features], axis=-1)

    locals_max_transform = tf.reduce_max(tf.concat(local_features, axis=-1),
                                         axis=-2,
                                         keep_dims=True)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(neighbors_features,
                                        locals_max_transform,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)

    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    n_heads = 4
    attns = []
    local_features = []
    for i in range(n_heads):
        edge_feature, coefs, locals = attn_feature(point_cloud_transformed,
                                                   16,
                                                   nn_idx,
                                                   activation=tf.nn.elu,
                                                   in_dropout=0.6,
                                                   coef_dropout=0.6,
                                                   is_training=is_training,
                                                   bn_decay=bn_decay,
                                                   layer='layer1',
                                                   k=k,
                                                   i=i)
        attns.append(edge_feature)
        local_features.append(locals)

    neighbors_features = tf.concat(attns, axis=-1)
    neighbors_features = tf.concat(
        [tf.expand_dims(point_cloud_transformed, -2), neighbors_features],
        axis=-1)

    net = tf_util.conv2d(neighbors_features,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet1',
                         bn_decay=bn_decay)
    net1 = net

    locals_max = tf.reduce_max(tf.concat(local_features, axis=-1),
                               axis=-2,
                               keep_dims=True)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet2',
                         bn_decay=bn_decay)
    net2 = net

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet3',
                         bn_decay=bn_decay)
    net3 = net

    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='gapnet4',
                         bn_decay=bn_decay)
    net4 = net

    net = tf_util.conv2d(tf.concat([net1, net2, net3, net4, locals_max],
                                   axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Esempio n. 9
0
def get_model_fine_tuing_evaluate(point_cloud,
                                  point_coords_in_voxels,
                                  num_scale,
                                  is_training,
                                  num_classes,
                                  bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # PYRAMID START #
    # m x n x 1024
    net = tf.squeeze(net, [2])
    print(net)

    # m x n x (4 x 128 = 512)
    points_feat1_concat = pyramid_nets.pyramid_convert_layer(
        net,
        point_coords_in_voxels,
        num_scale, [256],
        "Pyramid_1",
        bn=True,
        is_training=is_training,
        bn_decay=bn_decay)
    print(points_feat1_concat)

    # m x n x 1 x 512
    points_feat1_concat = tf.expand_dims(points_feat1_concat, [2])

    # Concat pyramid global and local features
    net = tf.expand_dims(net, [2])
    point_feat_concat = tf.concat(axis=3, values=[net, points_feat1_concat])
    # PYRAMID END #

    net = tf_util.conv2d(point_feat_concat,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6_pyramid',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7_pyramid',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv8_pyramid',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv9_pyramid',
                         bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         num_classes, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='conv10_pyramid')
    net = tf.squeeze(net, [2])  # BxNxC

    return net, end_points
Esempio n. 10
0
def get_model_fine_tuning_test(point_cloud,
                               point_coords_in_voxels,
                               num_scale,
                               is_training,
                               bn_decay=None):
    """ ConvNet baseline, input is BxNx3 gray image """
    end_points = {}
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    points_feat1 = tf_util.conv2d(net,
                                  1024, [1, 1],
                                  padding='VALID',
                                  stride=[1, 1],
                                  bn=True,
                                  is_training=is_training,
                                  scope='conv5',
                                  bn_decay=bn_decay)
    # PYRAMID START #
    # m x n x 1024
    points_feat1 = tf.squeeze(points_feat1, [2])
    print(points_feat1)

    # m x n x (4 x 128 = 512)
    points_feat1_concat = pyramid_nets.pyramid_convert_layer(
        points_feat1,
        point_coords_in_voxels,
        num_scale, [256],
        bn=True,
        is_training=is_training,
        bn_decay=bn_decay)
    print(points_feat1_concat)

    # m x n x 1 x 512
    points_feat1_concat = tf.expand_dims(points_feat1_concat, [2])

    # Concat pyramid global and local features
    points_feat1 = tf.expand_dims(points_feat1, [2])
    point_feat_concat = tf.concat(axis=3,
                                  values=[points_feat1, points_feat1_concat])
    # PYRAMID END #

    # Symmetric function: max pooling
    #net = tf_util.max_pool2d(point_feat_concat, [num_point,1], padding='VALID', scope='pyramid_maxpool')
    net = tf_util.avg_pool2d(point_feat_concat, [num_point, 1],
                             padding='VALID',
                             scope='pyramid_maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='pyramid_fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='pyramid_dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='pyramid_fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='pyramid_dp2')
    net = tf_util.fully_connected(net,
                                  40,
                                  activation_fn=None,
                                  scope='pyramid_fc3')

    return net, end_points
Esempio n. 11
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    #T-NET(1),input_transform_net
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
        # (32,3,3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    #(32,1024,3)*(32,3,3)->(32,1024,3)论文第2格

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    #(32,1024,3,1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    #(32,1024,1,64)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    #(32,1024,1,64),论文第3格

    #T-NET(2),feature_transform_net
    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
        #(32,64,64)
    end_points['transform'] = transform

    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    #tf.squeeze()删掉为1的维度,此处[2]轴维度为1
    #(32,1024,64)*(64,64)->(32,1024,64)论文第4格

    net_transformed = tf.expand_dims(net_transformed, [2])
    #(32,1024,1,64)

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    #(32,1024,1,1024)论文第5格

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')
    #stride=[2, 2]?
    #(32,1024,1,1024)->(32,1,1,1024),论文第六格
    net = tf.reshape(net, [batch_size, -1])
    # (32,1024)
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
    #(32,40),论文第7格
    return net, end_points
Esempio n. 12
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(3, [point_feat, global_feat_expand])
    print(concat_feat)

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 50, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2]) # BxNxC

    return net, end_points
Esempio n. 13
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
              batch_size, num_point, weight_decay=.00004, bn_decay=None):
    bn_decay = bn_decay if bn_decay is not None else 0.9

    with tf.variable_scope("DGCNN"):
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        input_image = tf.expand_dims(point_cloud, -1)

        k = 20
        bn_params = {
            "is_training": is_training,
            "decay": bn_decay,
            'renorm': True
        }

        adj = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(input_image,
                                                nn_idx=nn_idx,
                                                k=k)

        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(edge_feature,
                                            is_training,
                                            bn_decay,
                                            K=3)
        point_cloud_transformed = tf.matmul(point_cloud, transform)

        input_image = tf.expand_dims(point_cloud_transformed, -1)
        adj = tf_util.pairwise_distance(point_cloud_transformed)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(input_image,
                                                nn_idx=nn_idx,
                                                k=k)

        # out1 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
        out1 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv1')

        # out2 = tf_util.conv2d(out1, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv2', bn_decay=bn_decay, is_dist=True)
        out2 = layers.masked_conv2d(
            out1,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv2')

        net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

        adj = tf_util.pairwise_distance(net_1)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

        # out3 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv3', bn_decay=bn_decay, is_dist=True)
        out3 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv3')

        # out4 = tf_util.conv2d(out3, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
        out4 = layers.masked_conv2d(
            out3,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv4')

        net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

        adj = tf_util.pairwise_distance(net_2)
        nn_idx = tf_util.knn(adj, k=k)
        edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

        # out5 = tf_util.conv2d(edge_feature, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv5', bn_decay=bn_decay, is_dist=True)
        out5 = layers.masked_conv2d(
            edge_feature,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv5')

        # out6 = tf_util.conv2d(out5, 64, [1,1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training, weight_decay=weight_decay,
        #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

        net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

        # out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training,
        #                      scope='adj_conv7', bn_decay=bn_decay, is_dist=True)
        out7 = layers.masked_conv2d(
            tf.concat([net_1, net_2, net_3], axis=-1),
            1024,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='adj_conv7')

        # out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')
        out_max = slim.max_pool2d(out7, [num_point, 1],
                                  stride=1,
                                  padding='VALID',
                                  scope='maxpool')

        one_hot_label_expand = tf.reshape(input_label,
                                          [batch_size, 1, 1, cat_num])
        # one_hot_label_expand = tf_util.conv2d(one_hot_label_expand, 64, [1, 1],
        #                      padding='VALID', stride=[1,1],
        #                      bn=True, is_training=is_training,
        #                      scope='one_hot_label_expand', bn_decay=bn_decay, is_dist=True)
        one_hot_label_expand = layers.masked_conv2d(
            one_hot_label_expand,
            64,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='one_hot_label_expand')
        out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
        expand = tf.tile(out_max, [1, num_point, 1, 1])

        concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

        # net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            concat,
            256,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv1')
        # net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp1')
        net2 = slim.dropout(net2,
                            keep_prob=0.6,
                            is_training=is_training,
                            scope='seg/dp1')
        # net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            256,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv2')
        # net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp2')
        net2 = slim.dropout(net2,
                            keep_prob=0.6,
                            is_training=is_training,
                            scope='seg/dp2')
        # net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
        #           bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            128,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=slim.batch_norm,
            normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=tf.nn.relu6,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv3')
        # net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None,
        #           bn=False, scope='seg/conv4', weight_decay=weight_decay, is_dist=True)
        net2 = layers.masked_conv2d(
            net2,
            part_num,
            # max(int(round(64 * scale)), 32),
            [1, 1],
            padding='VALID',
            stride=1,
            normalizer_fn=None,
            # normalizer_params=bn_params,
            biases_initializer=tf.contrib.layers.xavier_initializer(),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            activation_fn=None,
            weights_initializer=tf.contrib.layers.xavier_initializer(),
            scope='seg/conv4')

        net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Esempio n. 14
0
def get_model(point_cloud, is_training, bn_decay=None, mask= None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    # net = tf_util.max_pool2d(net, [num_point,1],
    #                          padding='VALID', scope='maxpool')
    net = tf_util.max_pool2d_dropout(net, [num_point,1],
                             padding='VALID', scope='maxpool')


    net = tf.reshape(net, [batch_size, -1])
    net = tf.multiply(net, mask, name="mask_for_feature_evaluation")
    # net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)

    # leave out only single dropout.
    # net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                       scope='dp1')
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)

    # leave out only single dropout
    # net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                       scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Esempio n. 15
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(3, [point_feat, global_feat_expand])
    print(concat_feat)

    net = tf_util.conv2d(concat_feat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv8',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv9',
                         bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         50, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2])  # BxNxC

    return net, end_points
Esempio n. 16
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

#    f = open("conf.txt","r")
#    configuration = f.read()
#    lines=configuration.split("\n")
#    net = point_cloud
    net=tf.expand_dims(point_cloud,-1)



    net = tf_util.conv2d(net, 512, [1, 1],
                         padding = 'VALID', stride = [1,1],
                         bn=True, is_training = is_training,
                         scope='preStuff1', bn_decay = bn_decay)
#    net = tf_util.dropout(net, keep_prob = 0.3, is_training = is_training, scope='pSdp1')

    net = tf_util.conv2d(net, 128, [1, 1],
                         padding = 'VALID', stride = [1,1],
                         bn = True, is_training = is_training,
                         scope='preStuff2', bn_decay=bn_decay)
#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='pSdp2')

#    net = tf_util.conv2d(net, 64, [1, 1],
#                         padding = 'VALID', stride = [1, 1],
#                         bn=True, is_training=is_training,
#                         scope = 'preStuff3', bn_decay = bn_decay)

    net = tf_util.max_pool2d(net, [num_point, 1],
                         padding='VALID', scope='preStuff3')


    net = tf_util.conv2d(net, 128, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn=True, is_training=is_training,
                         scope = 'preStuff3', bn_decay = bn_decay)

#    net = tf.reshape(net, [batch_size, -1])

    net = tf.squeeze(net)
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(net, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 128, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)

#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')

    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
                #MÅSTE VARA 64 ANNARS ANDRA transform
#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

#    net = tf_util.max_pool2d(net_transformed, [num_point,1], padding = 'VALID', scope = 'maxp')

    net = tf_util.conv2d(net_transformed, 512, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv3', bn_decay = bn_decay)

#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp3')

    net = tf_util.conv2d(net, 64, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv4', bn_decay = bn_decay)

#    net = tf_util.dropout(net, keep_prob = 0.5, is_training = is_training, scope='dp4')

    net = tf_util.conv2d(net, 512, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv5', bn_decay = bn_decay)

#    net=tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp5')

    net = tf_util.max_pool2d(net, [num_point, 1],
                            padding = 'VALID', scope = 'maxpool')

    net = tf_util.conv2d(net, 512, [1,1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv6', bn_decay = bn_decay)
#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp6')


    net = tf_util.conv2d(net, 512, [1,1],
                        padding = 'VALID', stride = [1, 1],
                        bn = True, is_training = is_training,
                        scope = 'conv7', bn_decay = bn_decay)

    net = tf_util.conv2d(net, 512, [1,1],
                        padding = 'VALID', stride = [1, 1],
                        bn = True, is_training = is_training,
                        scope = 'conv8', bn_decay = bn_decay)


    # Symmetric function: max pooling
#    net = tf_util.max_pool2d(net, [num_point, 1],
#                             padding = 'VALID', scope = 'maxpool')
    net = tf.reshape(net, [batch_size, -1])

    net = tf_util.fully_connected(net, 512, bn = True, is_training = is_training,
                                 scope = 'fc1', bn_decay = bn_decay)

    net = tf_util.dropout(net, keep_prob=0.1, is_training=is_training, scope='dp7')

#    net = tf_util.dropout(net, keep_prob=0.3, is_training=is_training, scope='dp10')

    net = tf_util.fully_connected(net, 512, bn = True, is_training = is_training,
                                  scope = 'fcbos', bn_decay = bn_decay)

    net = tf_util.dropout(net, keep_prob=0.1, is_training=is_training, scope='dp11')

#    net = tf_util.fully_connected(net, 128, bn = True, is_training = is_training,
#                                  scope = 'fcbos2', bn_decay = bn_decay)

#    net = tf_util.dropout(net, keep_prob=0.2, is_training=is_training, scope='dp12')

    net = tf_util.fully_connected(net, 4, activation_fn = None, scope = 'fc3')

    pdb.set_trace()
    print((net))
#    changeLoggerNet(net)
    return net, end_points
Esempio n. 17
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    input_image = tf.expand_dims(point_cloud, -1)

    k = 20

    adj = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        is_dist=True)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    adj = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

    out1 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv1',
                          bn_decay=bn_decay,
                          is_dist=True)

    out2 = tf_util.conv2d(out1,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv2',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_1)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)

    out3 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv3',
                          bn_decay=bn_decay,
                          is_dist=True)

    out4 = tf_util.conv2d(out3,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv4',
                          bn_decay=bn_decay,
                          is_dist=True)

    net_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)

    adj = tf_util.pairwise_distance(net_2)
    nn_idx = tf_util.knn(adj, k=k)
    edge_feature = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)

    out5 = tf_util.conv2d(edge_feature,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          weight_decay=weight_decay,
                          scope='adj_conv5',
                          bn_decay=bn_decay,
                          is_dist=True)

    # out6 = tf_util.conv2d(out5, 64, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training, weight_decay=weight_decay,
    #                      scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

    net_3 = tf.reduce_max(out5, axis=-2, keep_dims=True)

    out7 = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1),
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='adj_conv7',
                          bn_decay=bn_decay,
                          is_dist=True)

    out_max = tf_util.max_pool2d(out7, [num_point, 1],
                                 padding='VALID',
                                 scope='maxpool')

    one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
    one_hot_label_expand = tf_util.conv2d(one_hot_label_expand,
                                          64, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=True,
                                          is_training=is_training,
                                          scope='one_hot_label_expand',
                                          bn_decay=bn_decay,
                                          is_dist=True)
    out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
    expand = tf.tile(out_max, [1, num_point, 1, 1])

    concat = tf.concat(axis=3, values=[expand, net_1, net_2, net_3])

    net2 = tf_util.conv2d(concat,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv1',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp1')
    net2 = tf_util.conv2d(net2,
                          256, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv2',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.dropout(net2,
                           keep_prob=0.6,
                           is_training=is_training,
                           scope='seg/dp2')
    net2 = tf_util.conv2d(net2,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn_decay=bn_decay,
                          bn=True,
                          is_training=is_training,
                          scope='seg/conv3',
                          weight_decay=weight_decay,
                          is_dist=True)
    net2 = tf_util.conv2d(net2,
                          part_num, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          activation_fn=None,
                          bn=False,
                          scope='seg/conv4',
                          weight_decay=weight_decay,
                          is_dist=True)

    net2 = tf.reshape(net2, [batch_size, num_point, part_num])

    return net2
Esempio n. 18
0
def Discriminator(point_cloud, is_training, bn_decay, reuse=False):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    with tf.variable_scope("dis", reuse=True):
        with tf.variable_scope('dtransform_net1', reuse=True) as sc:
            transform = input_transform_net(point_cloud,
                                            is_training,
                                            bn_decay,
                                            K=3)
        point_cloud_transformed = tf.matmul(point_cloud, transform)
        input_image = tf.expand_dims(point_cloud_transformed, -1)

        net = tf_util.conv2d(input_image,
                             64, [1, 3],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv1',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv2',
                             bn_decay=bn_decay)

        with tf.variable_scope('dtransform_net2') as sc:
            transform = feature_transform_net(net, is_training, bn_decay, K=64)
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])
        net = tf_util.conv2d(net_transformed,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv3',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             128, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv4',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             1024, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv5',
                             bn_decay=bn_decay)
        # Symmetric function: max pooling
        vector = tf_util.max_pool2d(net, [num_point, 1],
                                    padding='VALID',
                                    scope='dmaxpool')
        vector = tf.reshape(vector, [batch_size, -1])
        net = tf.reshape(vector, [batch_size, -1])
        net = tf_util.fully_connected(net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp2')
        net = tf_util.fully_connected(net,
                                      128,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc3',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp3')
        net = tf_util.fully_connected(net, 1, activation_fn=None, scope='fc4')
        D_value = tf.reduce_mean(net)
    return D_value
Esempio n. 19
0
def get_model_point(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value  #32
    num_point = point_cloud.get_shape()[1].value  #1024

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=4)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 4],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    max_index = tf.squeeze(tf.argmax(net, 1))
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')

    return net, transform, max_index
Esempio n. 20
0
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])

    # Apply input-transform network
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        normals=True)

    l0_xyz_transformed = tf.matmul(l0_xyz, transform)
    l0_points_transformed = tf.matmul(l0_points, transform)

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(
        l0_xyz_transformed,
        l0_points_transformed,
        512, [0.1, 0.2, 0.4], [32, 64, 128],
        [[32, 32, 64], [64, 64, 128], [64, 96, 128]],
        is_training,
        bn_decay,
        scope='layer1')

    # Apply feature-transform network after first SA layer
    with tf.variable_scope('transform_net2') as sc:
        K = l1_points.get_shape(
        )[2].value  # features were concatenated from multi-scales
        l1_points_expanded = tf.expand_dims(l1_points, 2)
        transform = feature_transform_net(l1_points_expanded,
                                          is_training,
                                          bn_decay,
                                          K=K)

    l1_points_transformed = tf.matmul(l1_points, transform)
    end_points['transform'] = transform

    l2_xyz, l2_points = pointnet_sa_module_msg(
        l1_xyz,
        l1_points_transformed,
        128, [0.4, 0.8], [64, 128], [[128, 128, 256], [128, 196, 256]],
        is_training,
        bn_decay,
        scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')

    # Feature propagation layers
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')

    cls_label_one_hot = tf.one_hot(cls_label,
                                   depth=NUM_CATEGORIES,
                                   on_value=1.0,
                                   off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot,
                                   [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1, num_point, 1])
    l0_points = pointnet_fp_module(
        l0_xyz_transformed,
        l1_xyz,
        tf.concat(
            [cls_label_one_hot, l0_xyz_transformed, l0_points_transformed],
            axis=-1),
        l1_points, [128, 128],
        is_training,
        bn_decay,
        scope='fp_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         50,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')

    return net, end_points
Esempio n. 21
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])
    end_points['l0_xyz'] = l0_xyz  ## it in localspecgcn but not in dgcnn

    # dgcnn
    # batch_size = point_cloud.get_shape()[0].value
    # num_point = point_cloud.get_shape()[1].value
    # end_points = {}
    k = 20

    # Set spectral abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz,
                                                       l0_points,
                                                       npoint=512,
                                                       radius=0.2,
                                                       nsample=32,
                                                       mlp=[64, 64, 128],
                                                       mlp2=None,
                                                       group_all=False,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module_spec(
        l1_xyz,
        l1_points,
        npoint=128,
        radius=0.4,
        nsample=32,
        mlp=[128, 256],
        mlp2=[256],
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer2',
        knn=True,
        spec_conv_type='mlp',
        structure='spec',
        useloc_covmat=True,
        pooling='max')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module_spec(
        l2_xyz,
        l2_points,
        npoint=32,
        radius=0.4,
        nsample=8,
        mlp=[256, 512],
        mlp2=[512],
        group_all=False,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer3',
        knn=True,
        spec_conv_type='mlp',
        structure='spec',
        useloc_covmat=True,
        pooling='hier_cluster_pool',
        csize=2)
    l4_xyz, l4_points, l4_indices = pointnet_sa_module_spec(
        l3_xyz,
        l3_points,
        npoint=None,
        radius=None,
        nsample=None,
        mlp=[512, 1024],
        mlp2=None,
        group_all=True,
        is_training=is_training,
        bn_decay=bn_decay,
        scope='layer4',
        knn=True,
        spec_conv_type='mlp',
        structure='spec',
        useloc_covmat=True,
        pooling='max')
    bottom1 = tf.reshape(l4_points, [batch_size, -1])

    #Set spatial abstraction layers
    adj_matrix = tf_util_dg.pairwise_distance(point_cloud)
    nn_idx = tf_util_dg.knn(adj_matrix, k=k)
    edge_feature = tf_util_dg.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=6)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    adj_matrix = tf_util_dg.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util_dg.knn(adj_matrix, k=k)
    edge_feature = tf_util_dg.get_edge_feature(point_cloud_transformed,
                                               nn_idx=nn_idx,
                                               k=k)
    net = tf_util_dg.conv2d(edge_feature,
                            64, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            bn=True,
                            is_training=is_training,
                            scope='dgcnn1',
                            bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util_dg.pairwise_distance(net)
    nn_idx = tf_util_dg.knn(adj_matrix, k=k)
    edge_feature = tf_util_dg.get_edge_feature(net, nn_idx=nn_idx, k=k)
    net = tf_util_dg.conv2d(edge_feature,
                            64, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            bn=True,
                            is_training=is_training,
                            scope='dgcnn2',
                            bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util_dg.pairwise_distance(net)
    nn_idx = tf_util_dg.knn(adj_matrix, k=k)
    edge_feature = tf_util_dg.get_edge_feature(net, nn_idx=nn_idx, k=k)
    net = tf_util_dg.conv2d(edge_feature,
                            64, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            bn=True,
                            is_training=is_training,
                            scope='dgcnn3',
                            bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util_dg.pairwise_distance(net)
    nn_idx = tf_util_dg.knn(adj_matrix, k=k)
    edge_feature = tf_util_dg.get_edge_feature(net, nn_idx=nn_idx, k=k)
    net = tf_util_dg.conv2d(edge_feature,
                            128, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            bn=True,
                            is_training=is_training,
                            scope='dgcnn4',
                            bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    net = tf_util_dg.conv2d(tf.concat([net1, net2, net3, net4], axis=-1),
                            1024, [1, 1],
                            padding='VALID',
                            stride=[1, 1],
                            bn=True,
                            is_training=is_training,
                            scope='agg',
                            bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=1, keep_dims=True)

    bottom2 = tf.reshape(net, [batch_size, -1])
    result = tf.add(bottom1, bottom2)

    # Fully connected layers
    net = tf.reshape(result, [batch_size, -1])
    net = tf_util_dg.fully_connected(net,
                                     512,
                                     bn=True,
                                     is_training=is_training,
                                     scope='fc1',
                                     bn_decay=bn_decay)
    net = tf_util_dg.dropout(net,
                             keep_prob=0.5,
                             is_training=is_training,
                             scope='dp1')
    net = tf_util_dg.fully_connected(net,
                                     256,
                                     bn=True,
                                     is_training=is_training,
                                     scope='fc2',
                                     bn_decay=bn_decay)
    net = tf_util_dg.dropout(net,
                             keep_prob=0.5,
                             is_training=is_training,
                             scope='dp2')
    net = tf_util_dg.fully_connected(net, 40, activation_fn=None, scope='fc3')
    return net, end_points
Esempio n. 22
0
def pointnet(num_classes,
             batch_size,
             num_points,
             bn_mom=0.9,
             workspace=512,
             scope="pointnet_"):
    point_cloud = mx.sym.Variable(name='data')  # (B,P,3)

    # Point cloud transformer
    transform = input_transform_net(point_cloud,
                                    batch_size,
                                    num_points,
                                    workspace,
                                    bn_mom,
                                    scope=scope + "itn_")  # (B, 3, 3)
    point_cloud_transformed = mx.sym.batch_dot(point_cloud,
                                               transform,
                                               name=scope + "input_transform")
    input_image = mx.sym.expand_dims(point_cloud_transformed,
                                     axis=1)  # (B, 1, P, 3)

    # Shared mlp
    conv0 = mx.sym.Convolution(data=input_image,
                               num_filter=64,
                               kernel=(1, 3),
                               stride=(1, 1),
                               name=scope + "conv0",
                               workspace=workspace)
    conv0 = mx.sym.BatchNorm(data=conv0,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn0')
    conv0 = mx.sym.Activation(data=conv0,
                              act_type='relu',
                              name=scope + 'relu0')

    conv1 = mx.sym.Convolution(data=conv0,
                               num_filter=64,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv1",
                               workspace=workspace)
    conv1 = mx.sym.BatchNorm(data=conv1,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn1')  # (B, 64, 1024, 1)
    conv1 = mx.sym.Activation(data=conv1,
                              act_type='relu',
                              name=scope + 'relu1')

    # Feature transformer
    transform = feature_transform_net(conv1,
                                      batch_size,
                                      num_points,
                                      workspace,
                                      bn_mom,
                                      scope=scope + "ftn_")  # (B, 64, 64)
    conv1_reshaped = mx.sym.Reshape(conv1, (-1, 64, num_points),
                                    name=scope +
                                    "conv1_reshape")  # (B, 64, 1024)
    conv1_reshaped = mx.sym.transpose(conv1_reshaped,
                                      axes=(0, 2, 1),
                                      name=scope + "conv1_reshape_transpose")
    conv1_transformed = mx.sym.batch_dot(conv1_reshaped,
                                         transform,
                                         name=scope + "conv1_transform")
    conv1_transformed = mx.sym.swapaxes(conv1_transformed,
                                        1,
                                        2,
                                        name=scope + "conv1_swapaxes")
    conv1_transformed = mx.sym.expand_dims(conv1_transformed,
                                           axis=3,
                                           name=scope + "conv1_expanddim")

    conv2 = mx.sym.Convolution(data=conv1_transformed,
                               num_filter=64,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv2",
                               workspace=workspace)
    conv2 = mx.sym.BatchNorm(data=conv2,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn2')
    conv2 = mx.sym.Activation(data=conv2,
                              act_type='relu',
                              name=scope + 'relu2')

    conv3 = mx.sym.Convolution(data=conv2,
                               num_filter=128,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv3",
                               workspace=workspace)
    conv3 = mx.sym.BatchNorm(data=conv3,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn3')
    conv3 = mx.sym.Activation(data=conv3,
                              act_type='relu',
                              name=scope + 'relu3')

    conv4 = mx.sym.Convolution(data=conv3,
                               num_filter=1024,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv4",
                               workspace=workspace)
    conv4 = mx.sym.BatchNorm(data=conv4,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn4')
    conv4 = mx.sym.Activation(data=conv4,
                              act_type='relu',
                              name=scope + 'relu4')

    pool5 = mx.sym.Pooling(data=conv4,
                           kernel=(num_points, 1),
                           pool_type='max',
                           name=scope + 'pool5')
    pool5_reshaped = mx.sym.Reshape(data=pool5,
                                    shape=(batch_size, -1),
                                    name=scope + 'pool5_reshape')

    fc6 = mx.sym.FullyConnected(data=pool5_reshaped,
                                num_hidden=512,
                                name=scope + 'fc6')
    fc6 = mx.sym.BatchNorm(data=fc6,
                           fix_gamma=False,
                           eps=eps,
                           momentum=bn_mom,
                           name=scope + 'bn6')
    fc6 = mx.sym.Activation(data=fc6, act_type='relu', name=scope + 'relu6')
    fc6 = mx.sym.Dropout(fc6, p=0.7)

    fc7 = mx.sym.FullyConnected(data=fc6, num_hidden=256, name=scope + 'fc7')
    fc7 = mx.sym.BatchNorm(data=fc7,
                           fix_gamma=False,
                           eps=eps,
                           momentum=bn_mom,
                           name=scope + 'bn7')
    fc7 = mx.sym.Activation(data=fc7, act_type='relu', name=scope + 'relu7')
    fc7 = mx.sym.Dropout(fc7, p=0.7)

    fc8 = mx.sym.FullyConnected(data=fc7, num_hidden=40, name=scope + 'fc8')
    cls = mx.sym.SoftmaxOutput(data=fc8, name='softmax')

    transform_transposed = mx.sym.transpose(transform,
                                            axes=(0, 2, 1),
                                            name=scope + "transpose_transform")
    mat_diff = mx.sym.batch_dot(transform,
                                transform_transposed,
                                name=scope + "transform_dot")
    const_arr = np.eye(64, dtype=np.float32).tolist()
    a = mx.sym.Variable('addition_loss_constant',
                        shape=(batch_size, 64, 64),
                        init=MyConstant(value=[const_arr] * batch_size))
    a = mx.sym.BlockGrad(a)  # now variable a is a constant
    mat_diff = mx.sym.elemwise_sub(mat_diff, a, name=scope + "sub_eye")
    mat_diff_loss = mx.sym.sum(mx.sym.square(mat_diff))
    matloss = mx.sym.make_loss(name='transform_mat_loss',
                               data=mat_diff_loss,
                               grad_scale=0.001 / (batch_size * 2.0))

    return mx.sym.Group([cls, matloss])
Esempio n. 23
0
def Encoder(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])
    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    # Symmetric function: max pooling
    vector = tf_util.max_pool2d(net, [num_point, 1],
                                padding='VALID',
                                scope='maxpool')
    vector = tf.reshape(vector, [batch_size, -1])
    print(np.shape(vector))
    return vector
Esempio n. 24
0
def get_model(point_cloud, input_label, is_training, cat_num, part_num, \
    batch_size, num_point, weight_decay, bn_decay=None):

  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  input_image = tf.expand_dims(point_cloud, -1)
  end_points = {}

  k = 25

  adj = tf_util.pairwise_distance(point_cloud)
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)

  with tf.variable_scope('transform_net1') as sc:
  transform = input_transform_net(edge_feature, is_training, bn_decay, K=3, is_dist=True)
  point_cloud_transformed = tf.matmul(point_cloud, transform)
  input_image = tf.expand_dims(point_cloud_transformed, -1)
  adj = tf_util.pairwise_distance(point_cloud_transformed)
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)


  out1 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv1', bn_decay=bn_decay, is_dist=True)
  
  out2 = tf_util.conv2d(out1, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv2', bn_decay=bn_decay, is_dist=True)

  net_max_1 = tf.reduce_max(out2, axis=-2, keep_dims=True)
  net_mean_1 = tf.reduce_mean(out2, axis=-2, keep_dims=True)

  out3 = tf_util.conv2d(tf.concat([net_max_1, net_mean_1], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv3', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out3, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out3, nn_idx=nn_idx, k=k)

  out4 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv4', bn_decay=bn_decay, is_dist=True)
  
  net_max_2 = tf.reduce_max(out4, axis=-2, keep_dims=True)
  net_mean_2 = tf.reduce_mean(out4, axis=-2, keep_dims=True)

  out5 = tf_util.conv2d(tf.concat([net_max_2, net_mean_2], axis=-1), 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv5', bn_decay=bn_decay, is_dist=True)

  adj = tf_util.pairwise_distance(tf.squeeze(out5, axis=-2))
  nn_idx = tf_util.knn(adj, k=k)
  edge_feature = tf_util.get_edge_feature(out5, nn_idx=nn_idx, k=k)

  out6 = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training, weight_decay=weight_decay,
                       scope='adj_conv6', bn_decay=bn_decay, is_dist=True)

  net_max_3 = tf.reduce_max(out6, axis=-2, keep_dims=True)
  net_mean_3 = tf.reduce_mean(out6, axis=-2, keep_dims=True)
    

  out7 = tf_util.conv2d(tf.concat([out3, out5, out6], axis=-1), 1024, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='adj_conv13', bn_decay=bn_decay, is_dist=True)

  out_max = tf_util.max_pool2d(out7, [num_point, 1], padding='VALID', scope='maxpool')

  one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num])
  one_hot_label_expand = tf_util.conv2d(one_hot_label_expand, 64, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='one_hot_label_expand', bn_decay=bn_decay, is_dist=True)
  out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand])
  expand = tf.tile(out_max, [1, num_point, 1, 1])

  concat = tf.concat(axis=3, values=[expand, 
                                     net_max_1,
                                     net_mean_1,
                                     out3,
                                     net_max_2,
                                     net_mean_2,
                                     out5,
                                     net_max_3,
                                     net_mean_3,
                                     out6,
                                     out7])

  net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp1')

  net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.dropout(net2, keep_prob=0.6, is_training=is_training, scope='seg/dp2')

  net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay,
            bn=True, is_training=is_training, scope='seg/conv4', weight_decay=weight_decay, is_dist=True)

  net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None, 
            bn=False, scope='seg/conv5', weight_decay=weight_decay, is_dist=True)

  net2 = tf.reshape(net2, [batch_size, num_point, part_num])

  return net2

def get_loss(seg_pred, seg):
  per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1)
  seg_loss = tf.reduce_mean(per_instance_seg_loss)
  per_instance_seg_pred_res = tf.argmax(seg_pred, 2)
  
  return seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
Esempio n. 25
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope(
            'transform_net1') as sc:  # # 使用 scope, 一方面可以实现共享,一方面避免冲突。
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(
        point_cloud, transform)  # # transformed shape: BxNx3
    input_image = tf.expand_dims(point_cloud_transformed,
                                 -1)  # # shape: BxNx3x1

    # # 使用 conv2d 完成 mlp (64, 64)
    net = tf_util.conv2d(
        input_image,
        64,
        [1, 3],
        padding='VALID',
        stride=[1, 1],
        # # 这里的 kernel size = [1, 3], 3表示 BxNx3x1中的3,将其转成一行。 输出 shape: BxNx1x64
        bn=True,
        is_training=is_training,
        scope='conv1',
        bn_decay=bn_decay)
    net = tf_util.conv2d(
        net,
        64,
        [1, 1],
        padding='VALID',
        stride=[1, 1],  # # 输出 shape: BxNx1x64
        bn=True,
        is_training=is_training,
        scope='conv2',
        bn_decay=bn_decay)

    with tf.variable_scope(
            'transform_net2') as sc:  # # 与 transform_net1 的 scope 区分开来
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(
        net, axis=[2]), transform)  # # net 的 shape: BxNx1x64, 需要把 大小为1的维度去掉
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Esempio n. 26
0
def get_gen_model(point_cloud,
                  is_training,
                  scope,
                  bradius=1.0,
                  reuse=None,
                  use_bn=False,
                  use_ibn=False,
                  use_normal=False,
                  bn_decay=None,
                  up_ratio=4,
                  num_addpoint=600,
                  idx=None,
                  is_crop=False):

    print "Crop flag is ", is_crop

    with tf.variable_scope(scope, reuse=reuse) as sc:
        batch_size = point_cloud.get_shape()[0].value
        num_point = point_cloud.get_shape()[1].value
        l0_xyz = point_cloud[:, :, 0:3]
        if use_normal:
            l0_points = point_cloud[:, :, 3:]
        else:
            l0_points = None

        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(l0_xyz,
                                            is_training,
                                            bn_decay=None,
                                            K=3)
            l0_xyz = tf.matmul(l0_xyz, transform)

        # Layer 1
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(
            l0_xyz,
            l0_points,
            npoint=num_point,
            radius=bradius * 0.1,
            bn=use_bn,
            ibn=use_ibn,
            nsample=12,
            mlp=[32, 32, 64],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer1')

        l2_xyz, l2_points, l2_indices = pointnet_sa_module(
            l1_xyz,
            l1_points,
            npoint=num_point / 2,
            radius=bradius * 0.2,
            bn=use_bn,
            ibn=use_ibn,
            nsample=32,
            mlp=[64, 64, 128],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer2')

        l3_xyz, l3_points, l3_indices = pointnet_sa_module(
            l2_xyz,
            l2_points,
            npoint=num_point / 4,
            radius=bradius * 0.4,
            bn=use_bn,
            ibn=use_ibn,
            nsample=32,
            mlp=[128, 128, 256],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer3')

        l4_xyz, l4_points, l4_indices = pointnet_sa_module(
            l3_xyz,
            l3_points,
            npoint=num_point / 8,
            radius=bradius * 0.6,
            bn=use_bn,
            ibn=use_ibn,
            nsample=32,
            mlp=[256, 256, 512],
            mlp2=None,
            group_all=False,
            is_training=is_training,
            bn_decay=bn_decay,
            scope='layer4')

        # Feature Propagation layers
        if not is_training:
            l0_xyz = tf.gather_nd(l0_xyz, idx[:, :int(num_point * 1 / 8), :])
            l1_points = tf.gather_nd(l1_points,
                                     idx[:, :int(num_point * 1 / 8), :])
        elif is_crop:
            l0_xyz = tf.gather_nd(l0_xyz, idx[:, :int(num_point * 1 / 2), :])
            l1_points = tf.gather_nd(l1_points,
                                     idx[:, :int(num_point * 1 / 2), :])

        up_l4_points = pointnet_fp_module(l0_xyz,
                                          l4_xyz,
                                          None,
                                          l4_points, [64],
                                          is_training,
                                          bn_decay,
                                          scope='fa_layer1',
                                          bn=use_bn,
                                          ibn=use_ibn)

        up_l3_points = pointnet_fp_module(l0_xyz,
                                          l3_xyz,
                                          None,
                                          l3_points, [64],
                                          is_training,
                                          bn_decay,
                                          scope='fa_layer2',
                                          bn=use_bn,
                                          ibn=use_ibn)

        up_l2_points = pointnet_fp_module(l0_xyz,
                                          l2_xyz,
                                          None,
                                          l2_points, [64],
                                          is_training,
                                          bn_decay,
                                          scope='fa_layer3',
                                          bn=use_bn,
                                          ibn=use_ibn)

        feat = tf.concat([up_l4_points, up_l3_points, up_l2_points, l1_points],
                         axis=-1)
        feat = tf.expand_dims(feat, axis=2)

        #branch1: the new generate points
        with tf.variable_scope('up_layer', reuse=reuse):
            up_feat_list = []
            for i in range(up_ratio):
                up_feat = tf_util2.conv2d(feat,
                                          256, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=False,
                                          is_training=is_training,
                                          scope='conv1_%d' % (i),
                                          bn_decay=bn_decay)

                up_feat = tf_util2.conv2d(up_feat,
                                          128, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=use_bn,
                                          is_training=is_training,
                                          scope='conv2_%d' % (i),
                                          bn_decay=bn_decay)
                up_feat_list.append(up_feat)
        up_feat = tf.concat(up_feat_list, axis=1)
        dist_feat = tf_util2.conv2d(up_feat,
                                    64, [1, 1],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=False,
                                    is_training=is_training,
                                    scope='dist_fc1',
                                    bn_decay=bn_decay,
                                    weight_decay=0.0)
        dist = tf_util2.conv2d(dist_feat,
                               1, [1, 1],
                               padding='VALID',
                               stride=[1, 1],
                               bn=False,
                               is_training=is_training,
                               scope='dist_fc2',
                               bn_decay=bn_decay,
                               activation_fn=None,
                               weight_decay=0.0)
        dist = tf.squeeze(dist, axis=[2, 3])

        #branch2: dist to the edge
        combined_feat = tf.concat((up_feat, dist_feat), axis=-1)
        coord_feat = tf_util2.conv2d(combined_feat,
                                     64, [1, 1],
                                     padding='VALID',
                                     stride=[1, 1],
                                     bn=False,
                                     is_training=is_training,
                                     scope='coord_fc1',
                                     bn_decay=bn_decay,
                                     weight_decay=0.0)
        r_coord = tf_util2.conv2d(coord_feat,
                                  3, [1, 1],
                                  padding='VALID',
                                  stride=[1, 1],
                                  bn=False,
                                  is_training=is_training,
                                  scope='coord_fc2',
                                  bn_decay=bn_decay,
                                  activation_fn=None,
                                  weight_decay=0.0)
        coord = tf.squeeze(r_coord, [2]) + tf.tile(l0_xyz[:, :, 0:3],
                                                   (1, up_ratio, 1))

        # prune the points according to probability(how to better prune it? as a guidance???)
        # poolsize = int(num_addpoint * 1.2)
        # val,idx1 = tf.nn.top_k(-dist,poolsize)
        # tmp_idx0 = tf.tile(tf.reshape(tf.range(batch_size),(batch_size,1)),(1,num_addpoint))
        # tmp_idx1 = tf.random_uniform((batch_size,num_addpoint),0,poolsize,tf.int32)
        # idx1 = tf.gather_nd(idx1,tf.stack([tmp_idx0,tmp_idx1],axis=-1))
        edge_dist, idx1 = tf.nn.top_k(-dist, num_addpoint)
        idx0 = tf.tile(tf.reshape(tf.range(batch_size), (batch_size, 1)),
                       (1, num_addpoint))
        idx = tf.stack([idx0, idx1], axis=-1)

    return dist, coord, idx, None
Esempio n. 27
0
def get_model(point_cloud, is_training, bn_decay=None):
  """ Classification PointNet, input is BxNx3, output Bx40 """
  batch_size = point_cloud.get_shape()[0].value
  num_point = point_cloud.get_shape()[1].value
  end_points = {}
  k = 20

  adj_matrix = tf_util.pairwise_distance(point_cloud)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)
  print(adj_matrix, nn_idx, edge_feature)  
  with tf.variable_scope('transform_net1') as sc:
    transform = input_transform_net(edge_feature, is_training, bn_decay, K=3)

  point_cloud_transformed = tf.matmul(point_cloud, transform)
  adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(point_cloud_transformed, nn_idx=nn_idx, k=k)

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn1', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net1 = net

  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn2', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net2 = net
 
  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  

  net = tf_util.conv2d(edge_feature, 64, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn3', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net3 = net

  adj_matrix = tf_util.pairwise_distance(net)
  nn_idx = tf_util.knn(adj_matrix, k=k)
  edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)  
  
  net = tf_util.conv2d(edge_feature, 128, [1,1],
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='dgcnn4', bn_decay=bn_decay)
  net = tf.reduce_max(net, axis=-2, keep_dims=True)
  net4 = net

  net = tf_util.conv2d(tf.concat([net1, net2, net3, net4], axis=-1), 1024, [1, 1], 
                       padding='VALID', stride=[1,1],
                       bn=True, is_training=is_training,
                       scope='agg', bn_decay=bn_decay)
 
  net = tf.reduce_max(net, axis=1, keep_dims=True) 

  # MLP on global point cloud vector
  end_points['post_max'] = net
  net = tf.reshape(net, [batch_size, -1]) 
  net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                scope='fc1', bn_decay=bn_decay)
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                         scope='dp1')
  net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                scope='fc2', bn_decay=bn_decay)
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                        scope='dp2')
  end_points['final'] = net
  net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

  return net, end_points
Esempio n. 28
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    var1 = tf.random_normal([32, 1024, 1, 64],
                            mean=0.0,
                            stddev=1.0,
                            dtype=tf.float32)
    var1 = tf.contrib.layers.fully_connected(var1,
                                             64,
                                             activation_fn=tf.nn.relu)
    var1 = tf.contrib.layers.fully_connected(var1,
                                             64,
                                             activation_fn=tf.nn.relu)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
        #transform.shape = (32,3,3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    #input_image = tf.expand_dims(point_cloud_transformed, -1)

    rotate_matric_ = rotate(k=3)
    input_image_rotate = tf.matmul(point_cloud_transformed, rotate_matric_)
    input_image_rotate = tf.expand_dims(input_image_rotate, -1)

    net = tf_util.conv2d(input_image_rotate,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)

    net = tf.concat(3, [net, var1])
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    #print(net.shape)
    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    #net = tf.add(net, var2)
    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)

    #net = tf.add(net, var3)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)

    #net = tf.add(net, var4)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    #print(net.shape)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
def get_model(point_cloud,
              is_training,
              bn_decay=None,
              use_input_trans=True,
              use_feature_trans=True):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    if use_input_trans:
        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(point_cloud,
                                            is_training,
                                            bn_decay,
                                            K=3)
        point_cloud_transformed = tf.matmul(point_cloud, transform)
    else:
        point_cloud_transformed = point_cloud
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    if use_feature_trans:
        with tf.variable_scope('transform_net2') as sc:
            transform = feature_transform_net(net, is_training, bn_decay, K=64)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])
    else:
        net_transformed = net

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])

    # Retrained layers
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    # net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
    #                               scope='transfer/fc3', bn_decay=bn_decay)
    # net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                       scope='transfer/dp3')
    # net = tf_util.fully_connected(net, 40, activation_fn=None, scope='transfer/fc4')

    return net, end_points
Esempio n. 30
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    # addition of transform layers
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet5',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net5 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net6 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net7 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet7',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net8 = net

    net = tf_util.conv2d(tf.concat(
        [net1, net2, net3, net4, net5, net6, net7, net8], axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')

    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')

    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
def pc_encoder(point_cloud, nasmples, is_training, bn_decay=None):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    point_dim = point_cloud.get_shape()[2].value
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)

    point_cloud_transformed = tf.expand_dims(point_cloud_transformed, -1)
    nn_dis, idx_batch = tf_util.get_knn(point_cloud, 12)

    # Encoder
    net = tf_util.conv2d(point_cloud_transformed,
                         64, [1, point_dim],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    point_feat_1 = tf_util.conv2d(net,
                                  128, [1, 1],
                                  padding='VALID',
                                  stride=[1, 1],
                                  bn=True,
                                  is_training=is_training,
                                  scope='conv3',
                                  bn_decay=bn_decay)

    print('------------ convPN_1 ------------')
    point_feat = tf_util.conv2d(point_feat_1,
                                256, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv4',
                                bn_decay=bn_decay)
    point_feat = tf_util.conv2d(point_feat,
                                256, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv5',
                                bn_decay=bn_decay)
    feature = tf.squeeze(point_feat, squeeze_dims=2)
    knn_feat = tf_util.cuda_maxpooling(feature, idx_batch)
    knn_feat = tf.expand_dims(knn_feat, axis=2)
    point_feat_2 = tf.concat([point_feat, knn_feat], axis=-1)  # 32 256 1 256

    print('------------ convPN_2 ------------')
    point_feat = tf_util.conv2d(point_feat_2,
                                256, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv6',
                                bn_decay=bn_decay)
    point_feat = tf_util.conv2d(point_feat,
                                256, [1, 1],
                                padding='VALID',
                                stride=[1, 1],
                                bn=True,
                                is_training=is_training,
                                scope='conv7',
                                bn_decay=bn_decay)
    feature = tf.squeeze(point_feat, squeeze_dims=2)
    knn_feat = tf_util.cuda_maxpooling(feature, idx_batch)
    knn_feat = tf.expand_dims(knn_feat, axis=2)
    point_feat_3 = tf.concat([point_feat, knn_feat], axis=-1)  # 32 256 1 512
    mix_feature = tf.concat([point_feat_1, point_feat_2, point_feat_3],
                            axis=-1)

    # ----------- maxpooling--------------
    global_feature = tf_util.max_pool2d(mix_feature, [num_point, 1],
                                        padding='VALID',
                                        scope='maxpool_1')
    net = tf.reshape(global_feature, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  1024,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc00',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  1024,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc01',
                                  bn_decay=bn_decay)
    net = tf_util.fully_connected(net,
                                  1024,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc02',
                                  bn_decay=bn_decay)
    net = tf.reshape(net, [batch_size, -1])
    return net
Esempio n. 32
0
def get_model_w_ae(ae, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = ae.x_reconstr.shape[0]
    num_point = ae.x_reconstr.shape[1]
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(ae.x_reconstr,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(ae.x_reconstr, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    #print("before maxpool")
    #print(net.get_shape())
    end_points['pre_max'] = net
    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')
    end_points['post_max'] = net
    #print("after maxpool")
    #print(net.get_shape())
    net = tf.reshape(net, [batch_size, -1])
    #print("after reshape")
    #print(net.get_shape())
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    #print(end_points['pre_max'].get_shape())
    return net, end_points