Exemplo n.º 1
0
def pointNet(point_cloud,  output_dim, is_training,use_bn=False, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx128 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, use_bn, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, use_bn, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=use_bn, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=use_bn, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=use_bn, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, output_dim, activation_fn=None, scope='fc3')
    
    net = tf.nn.l2_normalize(net, dim=1)

    return net, end_points
    
Exemplo n.º 2
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=6)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)
    net = tf_util.conv2d(input_image, 64, [1,6],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    print("net = ", net.shape)
    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    max_net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')
    avg_net = tf_util.avg_pool2d(net, [num_point,1],
                             padding='VALID', scope='avgpool')
    max_avg_net = tf.concat([max_net, avg_net], 3)

    net = tf.reshape(max_avg_net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 3
0
def forward(point_cloud, is_training, bn_decay=None):
    """PointNetVLAD,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 3, 
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    batch_num_queries = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_points = point_cloud.get_shape()[2].value
    CLUSTER_SIZE=64
    OUTPUT_DIM=256
    point_cloud = tf.reshape(point_cloud, [batch_num_queries*num_pointclouds_per_query, num_points,3])

    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net, is_training, bn_decay, K=64)
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    NetVLAD = lp.NetVLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE, 
                    output_dim=OUTPUT_DIM, gating=True, add_batch_norm=True,
                    is_training=is_training)

    net= tf.reshape(net,[-1,1024])
    net = tf.nn.l2_normalize(net,1)
    output = NetVLAD.forward(net)
    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output,1)
    output =  tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM])

    return output
Exemplo n.º 4
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 5
0
def feature_extractor(point_cloud, feature_length, is_training, bn_decay=None):
    """ extract fix length feature from a point cloud, input is BxNx3, output B x feature_length """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, [2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, feature_length, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')

    # output feature size = B x feature_length
    feature = tf.reshape(net, [batch_size, -1])

    return feature, end_points
Exemplo n.º 6
0
def get_model_fine_tuing_evaluate(point_cloud,
                                  point_coords_in_voxels,
                                  num_scale,
                                  is_training,
                                  num_classes,
                                  bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # PYRAMID START #
    # m x n x 1024
    net = tf.squeeze(net, [2])
    print(net)

    # m x n x (4 x 128 = 512)
    points_feat1_concat = pyramid_nets.pyramid_convert_layer(
        net,
        point_coords_in_voxels,
        num_scale, [256],
        "Pyramid_1",
        bn=True,
        is_training=is_training,
        bn_decay=bn_decay)
    print(points_feat1_concat)

    # m x n x 1 x 512
    points_feat1_concat = tf.expand_dims(points_feat1_concat, [2])

    # Concat pyramid global and local features
    net = tf.expand_dims(net, [2])
    point_feat_concat = tf.concat(axis=3, values=[net, points_feat1_concat])
    # PYRAMID END #

    net = tf_util.conv2d(point_feat_concat,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6_pyramid',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7_pyramid',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv8_pyramid',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv9_pyramid',
                         bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         num_classes, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='conv10_pyramid')
    net = tf.squeeze(net, [2])  # BxNxC

    return net, end_points
def get_model(point_cloud,
              is_training,
              bn_decay=None,
              use_input_trans=True,
              use_feature_trans=True):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    if use_input_trans:
        with tf.variable_scope('transform_net1') as sc:
            transform = input_transform_net(point_cloud,
                                            is_training,
                                            bn_decay,
                                            K=3)
        point_cloud_transformed = tf.matmul(point_cloud, transform)
    else:
        point_cloud_transformed = point_cloud
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    if use_feature_trans:
        with tf.variable_scope('transform_net2') as sc:
            transform = feature_transform_net(net, is_training, bn_decay, K=64)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])
    else:
        net_transformed = net

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])

    # Retrained layers
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    # net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
    #                               scope='transfer/fc3', bn_decay=bn_decay)
    # net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                       scope='transfer/dp3')
    # net = tf_util.fully_connected(net, 40, activation_fn=None, scope='transfer/fc4')

    return net, end_points
Exemplo n.º 8
0
def get_model(point_cloud, query_points, is_training, bn_decay=None):
    """ range regression PointNet, input is BxNx3(point_cloud) and Bx2(query_points), output Bx1 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    #
    query_points = tf.tile(
        tf.expand_dims(query_points, [1]),
        [1, num_point, 1])  # Now, query_points is with shape BxNx2
    query_points = tf.concat(
        [query_points, tf.zeros([batch_size, num_point, 1])],
        2)  # Now, query_points is with shape BxNx3
    # point_cloud_ab = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 2]) # alpha and beta, shape:BxNx2
    # point_cloud_range = tf.slice(point_cloud, [0, 0, 2], [-1, -1, 1]) # range, shape:BxNx1
    # shift_ab = point_cloud_ab - query_points
    shifted_pl = point_cloud - query_points
    #

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(shifted_pl, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(shifted_pl, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=False,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: average pooling
    net = tf_util.avg_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='avgpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=False,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=False,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 1, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 9
0
def get_model_fine_tuning_test(point_cloud,
                               point_coords_in_voxels,
                               num_scale,
                               is_training,
                               bn_decay=None):
    """ ConvNet baseline, input is BxNx3 gray image """
    end_points = {}
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    points_feat1 = tf_util.conv2d(net,
                                  1024, [1, 1],
                                  padding='VALID',
                                  stride=[1, 1],
                                  bn=True,
                                  is_training=is_training,
                                  scope='conv5',
                                  bn_decay=bn_decay)
    # PYRAMID START #
    # m x n x 1024
    points_feat1 = tf.squeeze(points_feat1, [2])
    print(points_feat1)

    # m x n x (4 x 128 = 512)
    points_feat1_concat = pyramid_nets.pyramid_convert_layer(
        points_feat1,
        point_coords_in_voxels,
        num_scale, [256],
        bn=True,
        is_training=is_training,
        bn_decay=bn_decay)
    print(points_feat1_concat)

    # m x n x 1 x 512
    points_feat1_concat = tf.expand_dims(points_feat1_concat, [2])

    # Concat pyramid global and local features
    points_feat1 = tf.expand_dims(points_feat1, [2])
    point_feat_concat = tf.concat(axis=3,
                                  values=[points_feat1, points_feat1_concat])
    # PYRAMID END #

    # Symmetric function: max pooling
    #net = tf_util.max_pool2d(point_feat_concat, [num_point,1], padding='VALID', scope='pyramid_maxpool')
    net = tf_util.avg_pool2d(point_feat_concat, [num_point, 1],
                             padding='VALID',
                             scope='pyramid_maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='pyramid_fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='pyramid_dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='pyramid_fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='pyramid_dp2')
    net = tf_util.fully_connected(net,
                                  40,
                                  activation_fn=None,
                                  scope='pyramid_fc3')

    return net, end_points
Exemplo n.º 10
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                                     padding='VALID', scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(3, [point_feat, global_feat_expand])
    print(concat_feat)

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv8', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv9', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 50, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2]) # BxNxC

    return net, end_points
Exemplo n.º 11
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    k = 20

    adj_matrix = tf_util.pairwise_distance(point_cloud)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud, nn_idx=nn_idx, k=k)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(edge_feature,
                                        is_training,
                                        bn_decay,
                                        K=3)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    adj_matrix = tf_util.pairwise_distance(point_cloud_transformed)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(point_cloud_transformed,
                                            nn_idx=nn_idx,
                                            k=k)

    # addition of transform layers
    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
        end_points['transform'] = transform
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet1',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net1 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet2',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net2 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet3',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net3 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet4',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net4 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet5',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net5 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net6 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet6',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net7 = net

    adj_matrix = tf_util.pairwise_distance(net)
    nn_idx = tf_util.knn(adj_matrix, k=k)
    edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

    net = tf_util.conv2d(edge_feature,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='dftnet7',
                         bn_decay=bn_decay)
    net = tf.reduce_max(net, axis=-2, keep_dims=True)
    net8 = net

    net = tf_util.conv2d(tf.concat(
        [net1, net2, net3, net4, net5, net6, net7, net8], axis=-1),
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='agg',
                         bn_decay=bn_decay)

    net = tf.reduce_max(net, axis=1, keep_dims=True)

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')

    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp2')

    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
def get_model(point_cloud, query_points, is_training, bn_decay=None):
    """ range regression PointNet, input is BxNx3(point_cloud) and Bx2(query_points), output Bx1 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    #
    query_points = tf.tile(tf.expand_dims(query_points, [1]), [1, num_point, 1]) # Now, query_points is with shape BxNx2
    query_points = tf.concat([query_points, tf.zeros([batch_size, num_point, 1])], 2) # Now, query_points is with shape BxNx3
    # point_cloud_ab = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 2]) # alpha and beta, shape:BxNx2
    point_cloud_range = tf.slice(point_cloud, [0, 0, 2], [-1, -1, 1]) # range, shape:BxNx1
    # shift_ab = point_cloud_ab - query_points
    shifted_pl = point_cloud - query_points
    #

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(shifted_pl, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(shifted_pl, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=False, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    global_feat = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat([point_feat, global_feat_expand], 3)
    print(concat_feat)

    net = tf_util.conv2d(concat_feat, 512, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 256, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv7', bn_decay=bn_decay)
    # net = tf_util.conv2d(net, 128, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training,
    #                      scope='conv8', bn_decay=bn_decay)
    # net = tf_util.conv2d(net, 128, [1,1],
    #                      padding='VALID', stride=[1,1],
    #                      bn=True, is_training=is_training,
    #                      scope='conv9', bn_decay=bn_decay)

    net = tf_util.conv2d(net, 1, [1,1],
                         padding='VALID', stride=[1,1], activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2]) # BxNxC
    net = tf.reduce_mean(tf.squeeze(tf.multiply(net, point_cloud_range)), axis=[1]) # BxNx1 dot BxNx1 output Bx1
    # output Bx1

    return net, end_points
Exemplo n.º 13
0
def forward(point_cloud, is_training, bn_decay=None):
    """LPD-Net:FNSF,    INPUT is batch_num_queries X num_pointclouds_per_query X num_points_per_pointcloud X 13,
                        OUTPUT batch_num_queries X num_pointclouds_per_query X output_dim """
    batch_num_queries = point_cloud.get_shape()[0].value
    num_pointclouds_per_query = point_cloud.get_shape()[1].value
    num_points = point_cloud.get_shape()[2].value
    CLUSTER_SIZE=64
    OUTPUT_DIM=256
    k=20
    point_cloud = tf.reshape(point_cloud, [batch_num_queries*num_pointclouds_per_query, num_points,13])

    point_cloud, feature_cloud = tf.split(point_cloud, [3,10], 2)

    with tf.variable_scope('transform_net1') as sc:
        input_transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, input_transform)

    # Neural Network to learn neighborhood features
    # feature_cloud = neural_feature_net(point_cloud, is_training, bn_decay, knn_k=20, F=10)

    point_cloud_input = tf.concat([point_cloud_transformed, feature_cloud], 2)

    point_cloud_input = tf.expand_dims(point_cloud_input, -1)

    net = tf_util.conv2d(point_cloud_input, 64, [1, 13],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        feature_transform = feature_transform_net(net, is_training, bn_decay, K=64)
    feature_transform = tf.matmul(tf.squeeze(net, axis=[2]), feature_transform)

    # Serial structure
    # Danymic Graph cnn for feature space
    with tf.variable_scope('DGfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(feature_transform)
        nn_idx = tf_util.knn(adj_matrix, k=k)
        edge_feature = tf_util.get_edge_feature(net, nn_idx=nn_idx, k=k)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='dgmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # Spatial Neighborhood fusion for cartesian space
    with tf.variable_scope('SNfeature') as sc:
        adj_matrix = tf_util.pairwise_distance(point_cloud)
        nn_idx = tf_util.knn(adj_matrix, k=k)

        idx_ = tf.range(batch_num_queries*num_pointclouds_per_query) * num_points
        idx_ = tf.reshape(idx_, [batch_num_queries*num_pointclouds_per_query, 1, 1])

        feature_cloud = tf.reshape(net, [-1, 64])
        edge_feature = tf.gather(feature_cloud, nn_idx+idx_)

        net = tf_util.conv2d(edge_feature, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp1', bn_decay=bn_decay)
        net = tf_util.conv2d(net, 64, [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope='snmlp2', bn_decay=bn_decay)
        net = tf.reduce_max(net, axis=-2, keep_dims=True)

    # MLP for fusion
    net = tf_util.conv2d(net, 64, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1, 1],
                         padding='VALID', stride=[1,1],
                         is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    point_wise_feature = net

    NetVLAD = lp.NetVLAD(feature_size=1024, max_samples=num_points, cluster_size=CLUSTER_SIZE, 
                    output_dim=OUTPUT_DIM, gating=True, add_batch_norm=True,
                    is_training=is_training)

    net= tf.reshape(net,[-1,1024])
    net = tf.nn.l2_normalize(net,1)
    output = NetVLAD.forward(net)
    print(output)

    #normalize to have norm 1
    output = tf.nn.l2_normalize(output,1)
    output =  tf.reshape(output,[batch_num_queries,num_pointclouds_per_query,OUTPUT_DIM])

    return output
Exemplo n.º 14
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    netStructure = []
    numberOfTransforms = 0
    convNumber = 1
    maxpoolNumber = 0
    fcNumber = 0
    dropoutNumber = 0
    numberOfTransforms = 0

    net = point_cloud

    if random.randint(1, 2) == 1:
        layer = ["expand", 0, 0]
        netStructure.append(layer)
        while True:
            i = random.randint(1, 3)
            if i == 1:
                convNumber += 1
                layer = [
                    "conv2d",
                    math.pow(2, random.randint(4, 10)), convNumber
                ]
                netStructure.append(layer)
            elif i == 2:
                maxpoolNumber += 1
                layer = ["maxpool", 0, maxpoolNumber]
                netStructure.append(layer)
            elif i == 3:
                layer = ["transform", 0, 1]
                netStructure.append(layer)
                break
            else:
                pass
    else:
        layer = ["transform", 0, 1]
        netStructure.append(layer)
    if random.randint(1, 2) == 1:
        convNumber += 1
        layer = [
            "conv2d_trans1",
            math.pow(2, random.randint(4, 10)), convNumber
        ]
        netStructure.append(layer)
        while True:
            i = random.randint(1, 3)
            if i == 1:
                convNumber += 1
                layer = [
                    "conv2d",
                    math.pow(2, random.randint(4, 10)), convNumber
                ]
                netStructure.append(layer)
            elif i == 2:
                maxpoolNumber += 1
                layer = ["maxpool", 0, maxpoolNumber]
                netStructure.append(layer)
            elif i == 3:
                convNumber += 1
                layer = ["conv2d", 64, convNumber]
                netStructure.append(layer)
                layer = ["transform", 0, 2]
                netStructure.append(layer)
                convNumber += 1
                layer = [
                    "conv2d",
                    math.pow(2, random.randint(4, 10)), convNumber
                ]
                netStructure.append(layer)
                break
            else:
                pass
    else:
        convNumber += 1
        layer = ["conv2d", 64, convNumber]
        netStructure.append(layer)
        layer = ["transform", 0, 2]
        netStructure.append(layer)
        convNumber += 1
        layer = [
            "conv2d_trans2",
            math.pow(2, random.randint(4, 10)), convNumber
        ]
        netStructure.append(layer)

    while True:
        i = random.randint(1, 3)
        if i == 1:
            convNumber += 1
            layer = ["conv2d", math.pow(2, random.randint(4, 10)), convNumber]
            netStructure.append(layer)
        elif i == 2:
            maxpoolNumber += 1
            layer = ["maxpool", 0, maxpoolNumber]
            netStructure.append(layer)
        elif i == 3:
            fcNumber += 1
            layer = ["fc", math.pow(2, random.randint(4, 10)), fcNumber]
            netStructure.append(layer)
            break
        else:
            pass
    while True:
        i = random.randint(1, 3)
        if i == 1:
            fcNumber += 1
            layer = ["fc", math.pow(2, random.randint(4, 10)), fcNumber]
            netStructure.append(layer)
        elif i == 2:
            dropoutNumber += 1
            layer = ["dropout", 0, dropoutNumber]
            netStructure.append(layer)
            fcNumber += 1
            layer = ["fc", math.pow(2, random.randint(4, 10)), fcNumber]
            netStructure.append(layer)
        elif i == 3:
            fcNumber += 1
            layer = ["fc", 4, fcNumber]
            netStructure.append(layer)
            break
        else:
            pass
    print(netStructure)
    netStructure = [["transform", 0, 1], ["conv2d", 64, 1],
                    ["transform", 0, 2], ["conv2d", 32, 2], ["maxpool", 0, 1],
                    ["fc", 64, 1], ["dropout", 0, 1], ["fc", 4, 2]]
    for layer in netStructure:

        print(layer)
        if layer[0] == "conv2d":
            print("conv")
            if layer[2] == 1:
                net = tf_util.conv2d(input_image,
                                     layer[1], [1, 3],
                                     padding='VALID',
                                     stride=[1, 1],
                                     bn=True,
                                     is_training=is_training,
                                     scope='conv%d' % (layer[2]),
                                     bn_decay=bn_decay)
            else:
                net = tf_util.conv2d(net,
                                     layer[1], [1, 1],
                                     padding='VALID',
                                     stride=[1, 1],
                                     bn=True,
                                     is_training=is_training,
                                     scope='conv%d' % (layer[2]),
                                     bn_decay=bn_decay)
            print(layer[:])
        elif layer[0] == "maxpool":
            net = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool%d' % (layer[2]))
            print(layer[:])
        elif layer[0] == "transform":
            if layer[2] == 1:
                with tf.variable_scope('transform_net%d' % (layer[2])) as sc:
                    transform = input_transform_net(point_cloud,
                                                    is_training,
                                                    bn_decay,
                                                    K=3)
                point_cloud_transformed = tf.matmul(point_cloud, transform)
                input_image = tf.expand_dims(point_cloud_transformed, -1)
                pdb.set_trace()
            else:
                with tf.variable_scope('transform_net%d' % (layer[2])) as sc:
                    transform = feature_transform_net(net,
                                                      is_training,
                                                      bn_decay,
                                                      K=64)
                end_points['transform'] = transform
                net_transformed = tf.matmul(tf.squeeze(net, axis=[2]),
                                            transform)
                net_transformed = tf.expand_dims(net_transformed, [2])
                pdb.set_trace()

        elif layer[0] == "fc":
            if layer[2] == 1:
                pdb.set_trace()
                net = tf.reshape(net, [batch_size, -1])
                net = tf_util.fully_connected(net,
                                              layer[1],
                                              bn=True,
                                              is_training=is_training,
                                              scope='fc%d' % (layer[2]),
                                              bn_decay=bn_decay)

            elif layer[1] == 4:
                net = tf_util.fully_connected(net,
                                              4,
                                              activation_fn=None,
                                              scope='fc%d' % (layer[2]))
            else:
                net = tf_util.fully_connected(net,
                                              layer[1],
                                              bn=True,
                                              is_training=is_training,
                                              scope='fc%d' % (layer[2]),
                                              bn_decay=bn_decay)
            print(layer[:])
        elif layer[0] == "dropout":
            net = tf_util.dropout(net,
                                  keep_prob=0.7,
                                  is_training=is_training,
                                  scope='dp%d' % (layer[2]))
            print(layer[:])
        elif layer[0] == "expand":
            print("expand")
            net = tf.expand_dims(net, -1)
        elif layer[0] == "conv2d_trans1":
            net = tf_util.conv2d(input_image,
                                 layer[1], [1, 3],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 scope='conv%d' % (layer[2]),
                                 bn_decay=bn_decay)
        elif layer[0] == "conv2d_trans2":
            net = tf_util.conv2d(net_transformed,
                                 layer[1], [1, 1],
                                 padding='VALID',
                                 stride=[1, 1],
                                 bn=True,
                                 is_training=is_training,
                                 scope='conv%d' % (layer[2]),
                                 bn_decay=bn_decay)
        else:
            pass

    return netStructure, net, end_points
Exemplo n.º 15
0
def get_model(
    point_cloud,
    is_training,
    one_hot_labels,
    bn_decay=None,
):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1', reuse=tf.AUTO_REUSE) as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tfgan.features.condition_tensor_from_onehot(net, one_hot_labels)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2', reuse=tf.AUTO_REUSE) as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    #net = tfgan.features.condition_tensor_from_onehot(net, one_hot_labels)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.avg_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 16
0
def cls(
    input_tensor=None,
    num_cls=40,
    C=None,
):

    num_point = C.num_point
    batch_size = C.batch_size
    is_training = C.is_training
    end_points = {}

    input_shape = (1024, 3)
    if input_tensor is None:
        input_pointcloud = layers.Input(shape=input_shape)
    else:
        input_pointcloud = input_tensor

    transform = input_transform_net(input_pointcloud,
                                    C,
                                    block='pointnet_cls',
                                    stage=1,
                                    is_training=is_training)
    point_cloud_transformed = tf.matmul(input_pointcloud, transform)

    input_image = tf.expand_dims(point_cloud_transformed, -1)

    x = layers.Conv2D(64, (1, 3),
                      strides=(1, 1),
                      trainable=is_training,
                      name='conv1')(input_image)
    x = layers.BatchNormalization(name='conv1_bn')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(64, (1, 1),
                      strides=(1, 1),
                      trainable=is_training,
                      name='conv2')(x)
    x = layers.BatchNormalization(name='conv2_bn')(x)
    x = layers.Activation('relu')(x)

    transform = feature_transform_net(x,
                                      C,
                                      block='pointnet_cls',
                                      stage=1,
                                      is_training=is_training)
    end_points['transform'] = transform

    x_transformed = tf.matmul(tf.squeeze(x, axis=-2), transform)
    x_transformed = tf.expand_dims(x_transformed, [2])

    x = layers.Conv2D(64, (1, 1),
                      strides=(1, 1),
                      trainable=is_training,
                      name='conv3')(x_transformed)
    x = layers.BatchNormalization(name='conv3_bn')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(128, (1, 1),
                      strides=(1, 1),
                      trainable=is_training,
                      name='conv4')(x)
    x = layers.BatchNormalization(name='conv4_bn')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(1024, (1, 1),
                      strides=(1, 1),
                      trainable=is_training,
                      name='conv5')(x)
    x = layers.BatchNormalization(name='conv5_bn')(x)
    x = layers.Activation('relu')(x)

    # Symmetric function from PointNet: max pooling
    x = layers.MaxPool2D((num_point, 1), name='maxpool1')(x)

    x = layers.Flatten()(x)

    x = layers.Dense(512,
                     activation='relu',
                     trainable=is_training,
                     name='fcl1')(x)
    x = layers.BatchNormalization(name='fcl1_bn')(x)
    x = layers.Dropout(rate=0.7, name='dp1')(x)

    x = layers.Dense(256,
                     activation='relu',
                     trainable=is_training,
                     name='fcl2')(x)
    x = layers.BatchNormalization(name='fcl2_bn')(x)
    x = layers.Dropout(rate=0.6, name='dp2')(x)

    output_cls = layers.Dense(num_cls,
                              activation=None,
                              trainable=is_training,
                              name='fcl3')(x)

    return output_cls, end_points
Exemplo n.º 17
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    #kx
    with tf.variable_scope('kx_input_pts') as pt:
        kx_input_pts = tf.multiply(point_cloud, 1)

    # with tf.variable_scope('transform_net1') as sc:
    #     transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    # point_cloud_transformed = tf.matmul(point_cloud, transform)
    point_cloud_transformed = point_cloud
    #kx
    # print("kx debug")
    # print(point_cloud.eval())
    # print(point_cloud_transformed.eval())
    # print(transform.eval())
    #kx
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 18
0
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
    l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, 3])

    # Apply input-transform network
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3,
                                        normals=True)

    l0_xyz_transformed = tf.matmul(l0_xyz, transform)
    l0_points_transformed = tf.matmul(l0_points, transform)

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(
        l0_xyz_transformed,
        l0_points_transformed,
        512, [0.1, 0.2, 0.4], [32, 64, 128],
        [[32, 32, 64], [64, 64, 128], [64, 96, 128]],
        is_training,
        bn_decay,
        scope='layer1')

    # Apply feature-transform network after first SA layer
    with tf.variable_scope('transform_net2') as sc:
        K = l1_points.get_shape(
        )[2].value  # features were concatenated from multi-scales
        l1_points_expanded = tf.expand_dims(l1_points, 2)
        transform = feature_transform_net(l1_points_expanded,
                                          is_training,
                                          bn_decay,
                                          K=K)

    l1_points_transformed = tf.matmul(l1_points, transform)
    end_points['transform'] = transform

    l2_xyz, l2_points = pointnet_sa_module_msg(
        l1_xyz,
        l1_points_transformed,
        128, [0.4, 0.8], [64, 128], [[128, 128, 256], [128, 196, 256]],
        is_training,
        bn_decay,
        scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz,
                                                       l2_points,
                                                       npoint=None,
                                                       radius=None,
                                                       nsample=None,
                                                       mlp=[256, 512, 1024],
                                                       mlp2=None,
                                                       group_all=True,
                                                       is_training=is_training,
                                                       bn_decay=bn_decay,
                                                       scope='layer3')

    # Feature propagation layers
    l2_points = pointnet_fp_module(l2_xyz,
                                   l3_xyz,
                                   l2_points,
                                   l3_points, [256, 256],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz,
                                   l2_xyz,
                                   l1_points,
                                   l2_points, [256, 128],
                                   is_training,
                                   bn_decay,
                                   scope='fa_layer2')

    cls_label_one_hot = tf.one_hot(cls_label,
                                   depth=NUM_CATEGORIES,
                                   on_value=1.0,
                                   off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot,
                                   [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1, num_point, 1])
    l0_points = pointnet_fp_module(
        l0_xyz_transformed,
        l1_xyz,
        tf.concat(
            [cls_label_one_hot, l0_xyz_transformed, l0_points_transformed],
            axis=-1),
        l1_points, [128, 128],
        is_training,
        bn_decay,
        scope='fp_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points,
                         128,
                         1,
                         padding='VALID',
                         bn=True,
                         is_training=is_training,
                         scope='fc1',
                         bn_decay=bn_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net,
                          keep_prob=0.5,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.conv1d(net,
                         50,
                         1,
                         padding='VALID',
                         activation_fn=None,
                         scope='fc2')

    return net, end_points
Exemplo n.º 19
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    var1 = tf.random_normal([32, 1024, 1, 64],
                            mean=0.0,
                            stddev=1.0,
                            dtype=tf.float32)
    var1 = tf.contrib.layers.fully_connected(var1,
                                             64,
                                             activation_fn=tf.nn.relu)
    var1 = tf.contrib.layers.fully_connected(var1,
                                             64,
                                             activation_fn=tf.nn.relu)

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
        #transform.shape = (32,3,3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    #input_image = tf.expand_dims(point_cloud_transformed, -1)

    rotate_matric_ = rotate(k=3)
    input_image_rotate = tf.matmul(point_cloud_transformed, rotate_matric_)
    input_image_rotate = tf.expand_dims(input_image_rotate, -1)

    net = tf_util.conv2d(input_image_rotate,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)

    net = tf.concat(3, [net, var1])
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    #print(net.shape)
    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    #net = tf.add(net, var2)
    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)

    #net = tf.add(net, var3)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)

    #net = tf.add(net, var4)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    #print(net.shape)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 20
0
def pointnet(num_classes,
             batch_size,
             num_points,
             bn_mom=0.9,
             workspace=512,
             scope="pointnet_"):
    point_cloud = mx.sym.Variable(name='data')  # (B,P,3)

    # Point cloud transformer
    transform = input_transform_net(point_cloud,
                                    batch_size,
                                    num_points,
                                    workspace,
                                    bn_mom,
                                    scope=scope + "itn_")  # (B, 3, 3)
    point_cloud_transformed = mx.sym.batch_dot(point_cloud,
                                               transform,
                                               name=scope + "input_transform")
    input_image = mx.sym.expand_dims(point_cloud_transformed,
                                     axis=1)  # (B, 1, P, 3)

    # Shared mlp
    conv0 = mx.sym.Convolution(data=input_image,
                               num_filter=64,
                               kernel=(1, 3),
                               stride=(1, 1),
                               name=scope + "conv0",
                               workspace=workspace)
    conv0 = mx.sym.BatchNorm(data=conv0,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn0')
    conv0 = mx.sym.Activation(data=conv0,
                              act_type='relu',
                              name=scope + 'relu0')

    conv1 = mx.sym.Convolution(data=conv0,
                               num_filter=64,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv1",
                               workspace=workspace)
    conv1 = mx.sym.BatchNorm(data=conv1,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn1')  # (B, 64, 1024, 1)
    conv1 = mx.sym.Activation(data=conv1,
                              act_type='relu',
                              name=scope + 'relu1')

    # Feature transformer
    transform = feature_transform_net(conv1,
                                      batch_size,
                                      num_points,
                                      workspace,
                                      bn_mom,
                                      scope=scope + "ftn_")  # (B, 64, 64)
    conv1_reshaped = mx.sym.Reshape(conv1, (-1, 64, num_points),
                                    name=scope +
                                    "conv1_reshape")  # (B, 64, 1024)
    conv1_reshaped = mx.sym.transpose(conv1_reshaped,
                                      axes=(0, 2, 1),
                                      name=scope + "conv1_reshape_transpose")
    conv1_transformed = mx.sym.batch_dot(conv1_reshaped,
                                         transform,
                                         name=scope + "conv1_transform")
    conv1_transformed = mx.sym.swapaxes(conv1_transformed,
                                        1,
                                        2,
                                        name=scope + "conv1_swapaxes")
    conv1_transformed = mx.sym.expand_dims(conv1_transformed,
                                           axis=3,
                                           name=scope + "conv1_expanddim")

    conv2 = mx.sym.Convolution(data=conv1_transformed,
                               num_filter=64,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv2",
                               workspace=workspace)
    conv2 = mx.sym.BatchNorm(data=conv2,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn2')
    conv2 = mx.sym.Activation(data=conv2,
                              act_type='relu',
                              name=scope + 'relu2')

    conv3 = mx.sym.Convolution(data=conv2,
                               num_filter=128,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv3",
                               workspace=workspace)
    conv3 = mx.sym.BatchNorm(data=conv3,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn3')
    conv3 = mx.sym.Activation(data=conv3,
                              act_type='relu',
                              name=scope + 'relu3')

    conv4 = mx.sym.Convolution(data=conv3,
                               num_filter=1024,
                               kernel=(1, 1),
                               stride=(1, 1),
                               name=scope + "conv4",
                               workspace=workspace)
    conv4 = mx.sym.BatchNorm(data=conv4,
                             fix_gamma=False,
                             eps=eps,
                             momentum=bn_mom,
                             name=scope + 'bn4')
    conv4 = mx.sym.Activation(data=conv4,
                              act_type='relu',
                              name=scope + 'relu4')

    pool5 = mx.sym.Pooling(data=conv4,
                           kernel=(num_points, 1),
                           pool_type='max',
                           name=scope + 'pool5')
    pool5_reshaped = mx.sym.Reshape(data=pool5,
                                    shape=(batch_size, -1),
                                    name=scope + 'pool5_reshape')

    fc6 = mx.sym.FullyConnected(data=pool5_reshaped,
                                num_hidden=512,
                                name=scope + 'fc6')
    fc6 = mx.sym.BatchNorm(data=fc6,
                           fix_gamma=False,
                           eps=eps,
                           momentum=bn_mom,
                           name=scope + 'bn6')
    fc6 = mx.sym.Activation(data=fc6, act_type='relu', name=scope + 'relu6')
    fc6 = mx.sym.Dropout(fc6, p=0.7)

    fc7 = mx.sym.FullyConnected(data=fc6, num_hidden=256, name=scope + 'fc7')
    fc7 = mx.sym.BatchNorm(data=fc7,
                           fix_gamma=False,
                           eps=eps,
                           momentum=bn_mom,
                           name=scope + 'bn7')
    fc7 = mx.sym.Activation(data=fc7, act_type='relu', name=scope + 'relu7')
    fc7 = mx.sym.Dropout(fc7, p=0.7)

    fc8 = mx.sym.FullyConnected(data=fc7, num_hidden=40, name=scope + 'fc8')
    cls = mx.sym.SoftmaxOutput(data=fc8, name='softmax')

    transform_transposed = mx.sym.transpose(transform,
                                            axes=(0, 2, 1),
                                            name=scope + "transpose_transform")
    mat_diff = mx.sym.batch_dot(transform,
                                transform_transposed,
                                name=scope + "transform_dot")
    const_arr = np.eye(64, dtype=np.float32).tolist()
    a = mx.sym.Variable('addition_loss_constant',
                        shape=(batch_size, 64, 64),
                        init=MyConstant(value=[const_arr] * batch_size))
    a = mx.sym.BlockGrad(a)  # now variable a is a constant
    mat_diff = mx.sym.elemwise_sub(mat_diff, a, name=scope + "sub_eye")
    mat_diff_loss = mx.sym.sum(mx.sym.square(mat_diff))
    matloss = mx.sym.make_loss(name='transform_mat_loss',
                               data=mat_diff_loss,
                               grad_scale=0.001 / (batch_size * 2.0))

    return mx.sym.Group([cls, matloss])
Exemplo n.º 21
0
def get_model_w_ae(ae, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = ae.x_reconstr.shape[0]
    num_point = ae.x_reconstr.shape[1]
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(ae.x_reconstr,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(ae.x_reconstr, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    #print("before maxpool")
    #print(net.get_shape())
    end_points['pre_max'] = net
    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')
    end_points['post_max'] = net
    #print("after maxpool")
    #print(net.get_shape())
    net = tf.reshape(net, [batch_size, -1])
    #print("after reshape")
    #print(net.get_shape())
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    #print(end_points['pre_max'].get_shape())
    return net, end_points
Exemplo n.º 22
0
def Encoder(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])
    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    # Symmetric function: max pooling
    vector = tf_util.max_pool2d(net, [num_point, 1],
                                padding='VALID',
                                scope='maxpool')
    vector = tf.reshape(vector, [batch_size, -1])
    print(np.shape(vector))
    return vector
Exemplo n.º 23
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    #T-NET(1),input_transform_net
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
        # (32,3,3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    #(32,1024,3)*(32,3,3)->(32,1024,3)论文第2格

    input_image = tf.expand_dims(point_cloud_transformed, -1)
    #(32,1024,3,1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    #(32,1024,1,64)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
    #(32,1024,1,64),论文第3格

    #T-NET(2),feature_transform_net
    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
        #(32,64,64)
    end_points['transform'] = transform

    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    #tf.squeeze()删掉为1的维度,此处[2]轴维度为1
    #(32,1024,64)*(64,64)->(32,1024,64)论文第4格

    net_transformed = tf.expand_dims(net_transformed, [2])
    #(32,1024,1,64)

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)
    #(32,1024,1,1024)论文第5格

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point,1],
                             padding='VALID', scope='maxpool')
    #stride=[2, 2]?
    #(32,1024,1,1024)->(32,1,1,1024),论文第六格
    net = tf.reshape(net, [batch_size, -1])
    # (32,1024)
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
    #(32,40),论文第7格
    return net, end_points
Exemplo n.º 24
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope(
            'transform_net1') as sc:  # # 使用 scope, 一方面可以实现共享,一方面避免冲突。
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(
        point_cloud, transform)  # # transformed shape: BxNx3
    input_image = tf.expand_dims(point_cloud_transformed,
                                 -1)  # # shape: BxNx3x1

    # # 使用 conv2d 完成 mlp (64, 64)
    net = tf_util.conv2d(
        input_image,
        64,
        [1, 3],
        padding='VALID',
        stride=[1, 1],
        # # 这里的 kernel size = [1, 3], 3表示 BxNx3x1中的3,将其转成一行。 输出 shape: BxNx1x64
        bn=True,
        is_training=is_training,
        scope='conv1',
        bn_decay=bn_decay)
    net = tf_util.conv2d(
        net,
        64,
        [1, 1],
        padding='VALID',
        stride=[1, 1],  # # 输出 shape: BxNx1x64
        bn=True,
        is_training=is_training,
        scope='conv2',
        bn_decay=bn_decay)

    with tf.variable_scope(
            'transform_net2') as sc:  # # 与 transform_net1 的 scope 区分开来
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(
        net, axis=[2]), transform)  # # net 的 shape: BxNx1x64, 需要把 大小为1的维度去掉
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 25
0
def Discriminator(point_cloud, is_training, bn_decay, reuse=False):
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    with tf.variable_scope("dis", reuse=True):
        with tf.variable_scope('dtransform_net1', reuse=True) as sc:
            transform = input_transform_net(point_cloud,
                                            is_training,
                                            bn_decay,
                                            K=3)
        point_cloud_transformed = tf.matmul(point_cloud, transform)
        input_image = tf.expand_dims(point_cloud_transformed, -1)

        net = tf_util.conv2d(input_image,
                             64, [1, 3],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv1',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv2',
                             bn_decay=bn_decay)

        with tf.variable_scope('dtransform_net2') as sc:
            transform = feature_transform_net(net, is_training, bn_decay, K=64)
        net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
        net_transformed = tf.expand_dims(net_transformed, [2])
        net = tf_util.conv2d(net_transformed,
                             64, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv3',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             128, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv4',
                             bn_decay=bn_decay)
        net = tf_util.conv2d(net,
                             1024, [1, 1],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='dconv5',
                             bn_decay=bn_decay)
        # Symmetric function: max pooling
        vector = tf_util.max_pool2d(net, [num_point, 1],
                                    padding='VALID',
                                    scope='dmaxpool')
        vector = tf.reshape(vector, [batch_size, -1])
        net = tf.reshape(vector, [batch_size, -1])
        net = tf_util.fully_connected(net,
                                      512,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc1',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp1')
        net = tf_util.fully_connected(net,
                                      256,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc2',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp2')
        net = tf_util.fully_connected(net,
                                      128,
                                      bn=True,
                                      is_training=is_training,
                                      scope='fc3',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net,
                              keep_prob=0.7,
                              is_training=is_training,
                              scope='dp3')
        net = tf_util.fully_connected(net, 1, activation_fn=None, scope='fc4')
        D_value = tf.reduce_mean(net)
    return D_value
Exemplo n.º 26
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    net = point_cloud
    #    net=tf.expand_dims(point_cloud,-1)
    #    net = tf.expand_dims(net, -1)

    #     net = tf.squeeze(net)
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(net, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)

    #    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    #    net = tf_util.max_pool2d(net_transformed, [num_point,1], padding = 'VALID', scope = 'maxp')

    net = tf_util.conv2d(net_transformed,
                         32, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)

    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')
    # Symmetric function: max pooling
    #    net = tf_util.max_pool2d(net, [num_point, 1],
    #                             padding = 'VALID', scope = 'maxpool')
    net = tf.reshape(net, [batch_size, -1])

    net = tf_util.fully_connected(net,
                                  64,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)

    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp7')

    net = tf_util.fully_connected(net, 4, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 27
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

#    f = open("conf.txt","r")
#    configuration = f.read()
#    lines=configuration.split("\n")
#    net = point_cloud
    net=tf.expand_dims(point_cloud,-1)



    net = tf_util.conv2d(net, 512, [1, 1],
                         padding = 'VALID', stride = [1,1],
                         bn=True, is_training = is_training,
                         scope='preStuff1', bn_decay = bn_decay)
#    net = tf_util.dropout(net, keep_prob = 0.3, is_training = is_training, scope='pSdp1')

    net = tf_util.conv2d(net, 128, [1, 1],
                         padding = 'VALID', stride = [1,1],
                         bn = True, is_training = is_training,
                         scope='preStuff2', bn_decay=bn_decay)
#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='pSdp2')

#    net = tf_util.conv2d(net, 64, [1, 1],
#                         padding = 'VALID', stride = [1, 1],
#                         bn=True, is_training=is_training,
#                         scope = 'preStuff3', bn_decay = bn_decay)

    net = tf_util.max_pool2d(net, [num_point, 1],
                         padding='VALID', scope='preStuff3')


    net = tf_util.conv2d(net, 128, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn=True, is_training=is_training,
                         scope = 'preStuff3', bn_decay = bn_decay)

#    net = tf.reshape(net, [batch_size, -1])

    net = tf.squeeze(net)
    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(net, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 128, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)

#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')

    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)
                #MÅSTE VARA 64 ANNARS ANDRA transform
#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

#    net = tf_util.max_pool2d(net_transformed, [num_point,1], padding = 'VALID', scope = 'maxp')

    net = tf_util.conv2d(net_transformed, 512, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv3', bn_decay = bn_decay)

#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp3')

    net = tf_util.conv2d(net, 64, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv4', bn_decay = bn_decay)

#    net = tf_util.dropout(net, keep_prob = 0.5, is_training = is_training, scope='dp4')

    net = tf_util.conv2d(net, 512, [1, 1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv5', bn_decay = bn_decay)

#    net=tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp5')

    net = tf_util.max_pool2d(net, [num_point, 1],
                            padding = 'VALID', scope = 'maxpool')

    net = tf_util.conv2d(net, 512, [1,1],
                         padding = 'VALID', stride = [1, 1],
                         bn = True, is_training = is_training,
                         scope = 'conv6', bn_decay = bn_decay)
#    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp6')


    net = tf_util.conv2d(net, 512, [1,1],
                        padding = 'VALID', stride = [1, 1],
                        bn = True, is_training = is_training,
                        scope = 'conv7', bn_decay = bn_decay)

    net = tf_util.conv2d(net, 512, [1,1],
                        padding = 'VALID', stride = [1, 1],
                        bn = True, is_training = is_training,
                        scope = 'conv8', bn_decay = bn_decay)


    # Symmetric function: max pooling
#    net = tf_util.max_pool2d(net, [num_point, 1],
#                             padding = 'VALID', scope = 'maxpool')
    net = tf.reshape(net, [batch_size, -1])

    net = tf_util.fully_connected(net, 512, bn = True, is_training = is_training,
                                 scope = 'fc1', bn_decay = bn_decay)

    net = tf_util.dropout(net, keep_prob=0.1, is_training=is_training, scope='dp7')

#    net = tf_util.dropout(net, keep_prob=0.3, is_training=is_training, scope='dp10')

    net = tf_util.fully_connected(net, 512, bn = True, is_training = is_training,
                                  scope = 'fcbos', bn_decay = bn_decay)

    net = tf_util.dropout(net, keep_prob=0.1, is_training=is_training, scope='dp11')

#    net = tf_util.fully_connected(net, 128, bn = True, is_training = is_training,
#                                  scope = 'fcbos2', bn_decay = bn_decay)

#    net = tf_util.dropout(net, keep_prob=0.2, is_training=is_training, scope='dp12')

    net = tf_util.fully_connected(net, 4, activation_fn = None, scope = 'fc3')

    pdb.set_trace()
    print((net))
#    changeLoggerNet(net)
    return net, end_points
Exemplo n.º 28
0
def get_model_groupdata(group_data, mask, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx4, output Bx40 """
    batch_size = group_data.get_shape()[0].value  #32
    num_point = group_data.get_shape()[1].value  #1024

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net_edge_net(group_data,
                                                 mask,
                                                 is_training,
                                                 bn_decay,
                                                 K=4)

    group_data_transformed = tf.matmul(
        tf.reshape(group_data, [batch_size, -1, 4]), transform)
    group_data_transformed = tf.reshape(group_data_transformed,
                                        [batch_size, num_point, -1])  # B N K C
    #input_image = tf.expand_dims(group_data_transformed, -1)
    input_image = group_data_transformed
    with tf.variable_scope('edge_net1') as sc:
        net, kernel, max_index_local_neighbor, masked_result = edge_net.edge_unit(
            input_image,
            mask,
            'max',
            config.neighbor_num,
            32,
            scope='conv1',
            bn=True,
            is_training=is_training,
            bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    max_index_neighbor = tf.squeeze(tf.argmax(net, 1))
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')

    return net, transform, max_index_neighbor, max_index_local_neighbor
Exemplo n.º 29
0
def get_model(point_cloud, is_training, bn_decay=None, mask= None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image, 64, [1,3],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed, 64, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay)

    # Symmetric function: max pooling
    # net = tf_util.max_pool2d(net, [num_point,1],
    #                          padding='VALID', scope='maxpool')
    net = tf_util.max_pool2d_dropout(net, [num_point,1],
                             padding='VALID', scope='maxpool')


    net = tf.reshape(net, [batch_size, -1])
    net = tf.multiply(net, mask, name="mask_for_feature_evaluation")
    # net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)

    # leave out only single dropout.
    # net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                       scope='dp1')
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)

    # leave out only single dropout
    # net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training,
    #                       scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points
Exemplo n.º 30
0
def get_model_ec(group_data, mask, is_training, bn_decay=None):
    # groupdata B N K*C
    batch_size = group_data.get_shape()[0].value
    num_point = group_data.get_shape()[1].value
    ec = econ.create_ec(group_data, mask)  # B N K ec_leghth
    ec_length = ec.get_shape()[3].value
    ec = tf.reshape(ec, [batch_size, num_point, -1])  # B N 9
    with tf.variable_scope('transform_net1_ec') as sc:
        transform = input_transform_net_edge_net(ec,
                                                 mask,
                                                 is_training,
                                                 bn_decay,
                                                 K=ec_length)

    ec_transformed = tf.matmul(tf.reshape(ec, [batch_size, -1, ec_length]),
                               transform)
    ec_transformed = tf.reshape(ec_transformed, [batch_size, num_point, -1])
    input_image = ec_transformed

    with tf.variable_scope('ec_net1') as sc:
        net, kernel, max_index_local_ec, masked_result = edge_net.edge_unit(
            input_image,
            mask,
            'max',
            config.neighbor_num,
            32,
            scope='conv1',
            bn=True,
            is_training=is_training,
            bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)

    max_index_ec = tf.squeeze(tf.argmax(net,
                                        1))  # Symmetric function: max pooling
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')

    return net, transform, max_index_ec, max_index_local_ec, masked_result
Exemplo n.º 31
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output BxNx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=3)
    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 3],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)
    end_points['transform'] = transform
    net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
    point_feat = tf.expand_dims(net_transformed, [2])
    print(point_feat)

    net = tf_util.conv2d(point_feat,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)
    global_feat = tf_util.max_pool2d(net, [num_point, 1],
                                     padding='VALID',
                                     scope='maxpool')
    print(global_feat)

    global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
    concat_feat = tf.concat(3, [point_feat, global_feat_expand])
    print(concat_feat)

    net = tf_util.conv2d(concat_feat,
                         512, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv6',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         256, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv7',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv8',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv9',
                         bn_decay=bn_decay)

    net = tf_util.conv2d(net,
                         50, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         activation_fn=None,
                         scope='conv10')
    net = tf.squeeze(net, [2])  # BxNxC

    return net, end_points
Exemplo n.º 32
0
def get_model_point(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value  #32
    num_point = point_cloud.get_shape()[1].value  #1024

    with tf.variable_scope('transform_net1') as sc:
        transform = input_transform_net(point_cloud,
                                        is_training,
                                        bn_decay,
                                        K=4)

    point_cloud_transformed = tf.matmul(point_cloud, transform)
    input_image = tf.expand_dims(point_cloud_transformed, -1)

    net = tf_util.conv2d(input_image,
                         64, [1, 4],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv1',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv2',
                         bn_decay=bn_decay)

    with tf.variable_scope('transform_net2') as sc:
        transform = feature_transform_net(net, is_training, bn_decay, K=64)

    net_transformed = tf.matmul(tf.squeeze(net), transform)
    net_transformed = tf.expand_dims(net_transformed, [2])

    net = tf_util.conv2d(net_transformed,
                         64, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv3',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         128, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv4',
                         bn_decay=bn_decay)
    net = tf_util.conv2d(net,
                         1024, [1, 1],
                         padding='VALID',
                         stride=[1, 1],
                         bn=True,
                         is_training=is_training,
                         scope='conv5',
                         bn_decay=bn_decay)

    # Symmetric function: max pooling
    max_index = tf.squeeze(tf.argmax(net, 1))
    net = tf_util.max_pool2d(net, [num_point, 1],
                             padding='VALID',
                             scope='maxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net,
                                  512,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc1',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net,
                                  256,
                                  bn=True,
                                  is_training=is_training,
                                  scope='fc2',
                                  bn_decay=bn_decay)
    net = tf_util.dropout(net,
                          keep_prob=0.7,
                          is_training=is_training,
                          scope='dp2')

    return net, transform, max_index