def se_net_res(input_x, ratio, layer_name, is_training, bn_decay):
    with tf.name_scope(layer_name):
        out_dim = input_x.get_shape()[-1].value
        squeeze = global_avg_pool(input_x)
        squeeze = tf.reshape(squeeze, [-1, 1, out_dim])
        excitation = tf_util.conv1d(squeeze,
                                    out_dim / ratio,
                                    1,
                                    padding='SAME',
                                    bn=True,
                                    is_training=is_training,
                                    scope=layer_name + 'fc1',
                                    bn_decay=bn_decay)
        excitation = tf.nn.relu(excitation)
        excitation = tf_util.conv1d(excitation,
                                    out_dim,
                                    1,
                                    padding='SAME',
                                    bn=True,
                                    is_training=is_training,
                                    scope=layer_name + 'fc2',
                                    bn_decay=bn_decay)
        excitation = tf.nn.sigmoid(excitation)
        excitation = tf.reshape(excitation, [-1, 1, 1, out_dim])
        scale = input_x * excitation
        scale = scale + input_x
        return scale
Beispiel #2
0
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])

    # Set Abstraction layers
    l1_xyz, l1_points = pointnet_sp2_module(l0_xyz, l0_points, npoint=512, mlp1=[32,32,64], mlp2=[64,64], is_training=is_training, bn_decay=bn_decay, scope='sp_layer1')
    l2_xyz, l2_points = pointnet_sp2_module(l1_xyz, l1_points, npoint=128, mlp1=[64,64,128], mlp2=[128,128], is_training=is_training, bn_decay=bn_decay, scope='sp_layer2')
    l3_xyz, l3_points = pointnet_sp2_module(l2_xyz, l2_points, npoint=32, mlp1=[128,128,256], mlp2=[256,256], is_training=is_training, bn_decay=bn_decay, scope='sp_layer3')
    _, global_points = pointnet_sp2_module(l3_xyz, l3_points, npoint=1, mlp1=[256,256,512], mlp2=[512,1024], is_training=is_training, bn_decay=bn_decay, k=32, scope='sp_layer4', group_all=True)

    # Global feature vector is concatenated to every point in the last fp layer
    l0_global_points = tf.tile(global_points, [1,l0_points.get_shape()[1].value,1])

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, tf.concat([l2_points,l2_xyz],axis=-1), l3_points, [512,256,256], is_training, bn_decay, scope='fp_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, tf.concat([l1_points,l1_xyz],axis=-1), l2_points, [256,128,128], is_training, bn_decay, scope='fp_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot,l0_xyz,l0_points,l0_global_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fp_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Beispiel #3
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [32,64,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.4,0.8], [64,128], [[128,128,256],[128,196,256]], is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')

    cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, l0_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='fp_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Beispiel #5
0
def get_stage_3(pointclouds_pl, field_pl, is_training, bn_decay=None):
    batch_size = pointclouds_pl.get_shape()[0].value
    scope = 'point'
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as sc:
        end_points1, feat1 = get_feature(pointclouds_pl, is_training, bn_decay)
    field1 = field_pl[:, 0, :, :]
    field2 = field_pl[:, 1, :, :]
    field3 = field_pl[:, 2, :, :]
    scope = 'field'
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as sc:
        end_points2_1, feat2_1 = get_feature(field1, is_training, bn_decay)

    scope = 'field'
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as sc:
        end_points2_2, feat2_2 = get_feature(field2, is_training, bn_decay)

    scope = 'field'
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as sc:
        end_points2_3, feat2_3 = get_feature(field3, is_training, bn_decay)

    feat2_1 = tf.expand_dims(feat2_1, axis=1)
    feat2_2 = tf.expand_dims(feat2_2, axis=1)
    feat2_3 = tf.expand_dims(feat2_3, axis=1)
    feat2 = tf.concat([feat2_1, feat2_2, feat2_3], axis=1)
    feat1 = tf.expand_dims(feat1, axis=1)
    feat = tf.concat([feat1, feat2], axis=1)
    feat = tf.reduce_max(feat, axis=1)
    feat_proposal = tf_util.conv1d(feat,
                                   128,
                                   1,
                                   padding='VALID',
                                   bn=True,
                                   is_training=is_training,
                                   scope='stage3/fc1_1',
                                   bn_decay=bn_decay)
    pred_proposal = tf_util.conv1d(feat_proposal,
                                   2,
                                   1,
                                   padding='VALID',
                                   bn=True,
                                   is_training=is_training,
                                   scope='stage3/fc1_2',
                                   bn_decay=bn_decay)
    feat_dof = tf_util.conv1d(feat,
                              128,
                              1,
                              padding='VALID',
                              bn=True,
                              is_training=is_training,
                              scope='stage3/fc2_1',
                              bn_decay=bn_decay)
    pred_dof_regression = tf_util.conv1d(feat_dof,
                                         6,
                                         4096,
                                         padding='VALID',
                                         bn=True,
                                         is_training=is_training,
                                         scope='stage3/fc2_2',
                                         bn_decay=bn_decay)
    return pred_proposal, pred_dof_regression
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx5, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,2])
    end_points['l0_xyz'] = l0_xyz
    

    # Layer 1
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 1024, [0.1, 0.4], [16, 128],[[32,64],[64,  128],[64,128]], is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 256, [0.1, 0.5], [32,128],[[64,128], [128,256], [128,256]], is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, 128, radius=0.6, nsample=64, mlp=[256,128,256],mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, 16, radius=0.8, nsample=32, mlp=[256,256,512],mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Beispiel #7
0
def get_decoder(embedding, is_training, scope='pointnet2_decoder', bn_decay=None, bn=True, end_points = {}):
    with tf.name_scope(scope) as sc:
        l2_xyz = end_points['l2_xyz'] 
        l3_xyz = end_points['l3_xyz'] 
        l1_xyz = end_points['l1_xyz'] 
        l0_xyz = end_points['l0_xyz'] 
        l2_points = end_points['l2_points'] 
        l3_points = end_points['l3_points'] 
        l1_points = end_points['l1_points'] 
        l0_points = end_points['l0_points'] 

        batch_size = embedding.get_shape()[0].value
        # net = tf_util.fully_connected(embedding, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
        # net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
        # net = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')
        # pc_fc = tf.reshape(net, (batch_size, -1, 3))

        embedding = tf.expand_dims(embedding, axis=1)
        l3_points = tf.concat([embedding, l3_points], axis = -1)

        # Feature Propagation layers
        l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
        l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
        l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

        # FC layers
        net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='decoder_fc1', bn_decay=bn_decay)
        net = tf_util.conv1d(l0_points, 3, 1, padding='VALID', bn=False, is_training=is_training, scope='decoder_fc2', bn_decay=None, activation_fn=None)
        # net = tf_util.conv2d_transpose(net, 3, kernel_size=[1,1], stride=[1,1], padding='VALID', scope='fc2', activation_fn=None)

        reconst_pc = tf.reshape(net, [batch_size, -1, 3])

    
    return reconst_pc
Beispiel #8
0
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Beispiel #9
0
def get_model(point_cloud, cls_label, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices, xyz1_feature = LSA_layer(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=[64,64], 
                                                                    group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices, xyz2_feature = LSA_layer(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=[64,64], 
                                                                    group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2', xyz_feature=xyz1_feature)
    l3_xyz, l3_points, l3_indices, xyz3_feature = LSA_layer(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=[64,64], 
                                                                    group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3', xyz_feature=xyz2_feature, end=True)
   
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
    cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Beispiel #10
0
def get_model(point_cloud, feature_cloud, color_cloud, s1, s2, s3, s4, g1, g2, g3, g4, c1, c2, c3, c4, t1, t2, t3, t4, is_training, num_class, use_color=0, bn_decay=None):
    """ Semantic segmentation TextureNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    l0_xyz = point_cloud
    l0_points = None

    if use_color == 0:
        l0_points = None
    else:
        l0_points = feature_cloud
    if use_color == 2:
        l0_cloud = TextureConv(color_cloud, is_training, bn_decay)
        l0_points = tf.concat([l0_points,l0_cloud],axis=-1)

    # Layer 1
    l1_xyz, l1_points = texture_geodesic_conv(s1, g1, c1, t1, l0_xyz, l0_points, npoint=1024, radius=0.1, conv_radius=0.1, conv_mlp = None, nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_color=use_color)
    l2_xyz, l2_points = texture_geodesic_conv(s2, g2, c2, t2, l1_xyz, l1_points, npoint=256, radius=0.2, conv_radius=0.2, conv_mlp = None, nsample=32, mlp=[64, 64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2', use_color=use_color)
    l3_xyz, l3_points = texture_geodesic_conv(s3, g3, c3, t3, l2_xyz, l2_points, npoint=64, radius=0.4, conv_radius=0.4, conv_mlp = None, nsample=32, mlp=[128, 128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3', use_color=use_color)
    l4_xyz, l4_points = texture_geodesic_conv(s4, g4, c4, t4, l3_xyz, l3_points, npoint=16, radius=0.8, conv_radius=0.8, conv_mlp = None, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4', use_color=use_color)

    # Feature Propagation layers
    l3_points = texture_geodesic_tconv(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = texture_geodesic_tconv(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    l1_points = texture_geodesic_tconv(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    l0_points = texture_geodesic_tconv(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net
Beispiel #11
0
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Beispiel #12
0
 def get_region_proposal_net(self, point_feats, is_training, bn_decay,
                             end_points):
     batch_size = point_feats.get_shape()[0].value
     npoints = point_feats.get_shape()[1].value
     # xyz is not used
     point_feats = tf.slice(point_feats, [0, 0, 3],
                            [-1, -1, -1])  # (B, N, D)
     # FC layers
     net = tf_util.conv1d(point_feats,
                          256,
                          1,
                          padding='VALID',
                          bn=True,
                          is_training=is_training,
                          scope='rp-conv1d-fc1',
                          bn_decay=bn_decay)
     net = tf_util.dropout(net,
                           keep_prob=0.5,
                           is_training=is_training,
                           scope='rp-dp1')
     #net = tf_util.conv1d(net, 256, 1, padding='VALID', bn=True,
     #    is_training=is_training, scope='rp-conv1d-fc2', bn_decay=bn_decay)
     #net = tf_util.dropout(net, keep_prob=0.5,
     #    is_training=is_training, scope='rp-dp2')
     output = tf_util.conv1d(net,
                             NUM_CENTER_BIN * 2 * 2 + 1 +
                             NUM_HEADING_BIN * 2 + NUM_SIZE_CLUSTER * 4,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='rp-conv1d-fc-out')
     end_points['proposals'] = output
     return output
Beispiel #13
0
def get_model(point_cloud, is_training, bn_decay=None, num_class = NUM_CLASSES):
    """ Part segmentation PointNet, input is BxNx3 (XYZ) """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = None

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    ###########SEGMENTATION BRANCH
    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='seg_fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='seg_dp1')
    seg_pred = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='seg_fc2')

    return seg_pred
Beispiel #14
0
    def _dnn(self, points_in, is_training):
        """
        Central definition of the deep neural network: creates the sa and fp layers
        and handles dropout.
        :param points_in: tensor (batch x num_points x (3+num_feat)). input points (x,y,z,attr...)
        :param is_training: bool.
        :return: last layer of net
        """
        with tf.variable_scope('dnn'), tf.device('/gpu:0'):
            ln_xyz = [tf.slice(points_in, [0, 0, 0],
                               [-1, -1, 3])]  # point coordinates
            ln_feat_in = [tf.slice(points_in, [0, 0, 3],
                                   [-1, -1, -1])]  # point attributes
            ln_feat = [tf.slice(points_in, [0, 0, 3],
                                [-1, -1, -1])]  # point attributes

            if self.savefiles:
                self._ln_xyz = ln_xyz
                self._ln_feat_in = ln_feat_in
                self._ln_feat = ln_feat

            for depth, step_dict in enumerate(self.arch):  # set abstraction
                xyz, feat = self._pointnet_sa(step_dict, ln_xyz[depth],
                                              ln_feat[depth], is_training,
                                              'sa_layer_%d' % (depth + 1))
                ln_xyz.append(xyz)
                ln_feat.append(feat)
                ln_feat_in.append(feat)

            for depth, step_dict in enumerate(reversed(
                    self.arch)):  # feature propagation
                depth = len(self.arch) - depth
                feat = self._pointnet_fp(step_dict, ln_xyz[depth - 1],
                                         ln_xyz[depth], ln_feat[depth - 1],
                                         ln_feat[depth], is_training,
                                         'fp_layer_%d' % (depth - 1))
                ln_feat[depth - 1] = feat

            l0_feats = ln_feat[0]
            net = tf_util.conv1d(l0_feats,
                                 128,
                                 1,
                                 padding='VALID',
                                 bn=True,
                                 is_training=is_training,
                                 scope='fc1',
                                 bn_decay=None)
            net = tf_util.dropout(net,
                                  keep_prob=(1 - self.dropout),
                                  is_training=is_training,
                                  scope='dp1')
            net = tf_util.conv1d(net,
                                 self.num_classes,
                                 1,
                                 padding='VALID',
                                 activation_fn=None,
                                 scope='fc2',
                                 name='net')
            return net
Beispiel #15
0
def JSPNet_PIFF_later(sem_input,ins_input,is_training,is_dist,bn_decay,num_point,num_embed,num_class):
    #original JSNet, containing output generation
    # Adaptation
    ###ACFmodule test##
    #sem_acf = ACFModule(sem_input,ins_input,1,1,128,1,1,is_training,is_dist,bn_decay)
    #sem_acf = tf_util.conv1d(sem_acf,128,1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='sem_cache_1', bn_decay=bn_decay)
    #net_ins_1 = ins_input + sem_acf#[b,4096,128]

    sem2ins = tf_util.conv1d(sem_input,128,1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='sem2ins_cache_1', bn_decay=bn_decay)
    net_ins_1 = ins_input + sem2ins

    net_ins_2 = tf.concat([ins_input, net_ins_1], axis=-1, name='net_ins_2_concat')#
    net_ins_atten = tf.sigmoid(tf.reduce_mean(net_ins_2, axis=-1, keep_dims=True, name='ins_reduce'), name='ins_atten') #[batch_size,4096,1]
    net_ins_3 = net_ins_2 * net_ins_atten#[b,4096,256]

    # Aggregation
    #ins_acf = ACFModule(ins_input,sem_input,1,1,128,1,1,is_training,is_dist,bn_decay,name='ins')
    #ins_acf = tf_util.conv1d(ins_acf, 128, 1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='ins_cache_1', bn_decay=bn_decay)
    ##[b,4096,128]
    #net_ins_cache_1 = tf.reduce_mean(ins_acf, axis=1, keep_dims=True, name='ins_cache_2')#[b,4096,128]
    #net_ins_cache_1 = tf.tile(net_ins_cache_1, [1, num_point, 1], name='ins_cache_tile')
    #net_sem_1 = sem_input + ins_acf#[b,4096,128]
    net_ins_3_ada = tf_util.conv1d(net_ins_3,128,1,padding='VALID',bn=True, is_training=is_training, is_dist=is_dist, scope='net_ins_3_ada', bn_decay=bn_decay)
    ins2sem = tf.reduce_mean(net_ins_3_ada, axis=1, keep_dims=True, name='ins2sem_cache_1')
    ins2sem = tf.tile(ins2sem, [1, num_point, 1], name='ins2sem_cache_tile')
    net_sem_1 = sem_input + ins2sem

    net_sem_2 = tf.concat([sem_input, net_sem_1], axis=-1, name='net_sem_2_concat')#[b,4096,256]
    net_sem_atten = tf.sigmoid(tf.reduce_mean(net_sem_2, axis=-1, keep_dims=True, name='sem_reduce'), name='sem_atten')#[b,4096,1]
    net_sem_3 = net_sem_2 * net_sem_atten#[b,4096,256]

    # ACF
    net_sem_4 = tf_util.conv1d(net_sem_3,128,1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='net_sem_4_ada', bn_decay=bn_decay)
    net_ins_4 = tf_util.conv1d(net_ins_3,128,1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='net_ins_4_ada', bn_decay=bn_decay)

    sem_acf = ACFModule(net_sem_4,net_ins_4,1,1,128,1,1,is_training,is_dist,bn_decay,name='sem',concat=True)
    ins_acf = ACFModule(net_ins_4,net_sem_4,1,1,128,1,1,is_training,is_dist,bn_decay,name='ins',concat=True)

    sem_acf = sem_acf + net_sem_3
    ins_acf = ins_acf + net_ins_3

    sem_acf = tf_util.conv1d(sem_acf,128,1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='sem_acf_ada', bn_decay=bn_decay)
    ins_acf = tf_util.conv1d(ins_acf,128,1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='ins_acf_ada', bn_decay=bn_decay)



    # Output
    net_ins_3 = tf_util.conv1d(ins_acf, 128, 1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='ins_fc2', bn_decay=bn_decay)
    #[b,4096,128]
    net_ins_4 = tf_util.dropout(net_ins_3, keep_prob=0.5, is_training=is_training, scope='ins_dp_4')
    net_ins_4 = tf_util.conv1d(net_ins_4, num_embed, 1, padding='VALID', activation_fn=None, is_dist=is_dist, scope='ins_fc5')
    #[b,4096,5]

    net_sem_3 = tf_util.conv1d(sem_acf, 128, 1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='sem_fc2', bn_decay=bn_decay)
    net_sem_4 = tf_util.dropout(net_sem_3, keep_prob=0.5, is_training=is_training, scope='sem_dp_4')
    net_sem_4 = tf_util.conv1d(net_sem_4, num_class, 1, padding='VALID', activation_fn=None, is_dist=is_dist, scope='sem_fc5')

    return net_ins_4, net_sem_4
Beispiel #16
0
def get_model(point_cloud, cls_label, pp_idx, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    cls_label_one_hot = tf.one_hot(cls_label, depth=NUM_CATEGORIES, on_value=1.0, off_value=0.0)
    cls_label_one_hot = tf.reshape(cls_label_one_hot, [batch_size, 1, NUM_CATEGORIES])
    cls_label_one_hot = tf.tile(cls_label_one_hot, [1,num_point,1])

    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    orig_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set Abstraction layers
    l1_xyz, l1_points, _ = pointnet_sa_module(l0_xyz, orig_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, _ = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')

    # down sampling to one global point
    l3_xyz, l32_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer32')
    _, l31_points, _ = pointnet_sa_module(l1_xyz, l1_points, npoint=None, radius=None, nsample=None, mlp=[128,256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer31')
    _, l30_points, _ = pointnet_sa_module(l0_xyz, orig_points, npoint=None, radius=None, nsample=None, mlp=[64,128,256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer30')

    l3_points = l32_points + l31_points + l30_points

    # Feature propagation layers (feature propagation after each SA layer)
    fp2_points = pointnet_fp_module(l0_xyz, l3_xyz, tf.concat([cls_label_one_hot, l0_xyz, orig_points],axis=-1), l3_points, [256,128], is_training, bn_decay, scope='dfp_layer1')
    fp1_points = pointnet_fp_module(l0_xyz, l2_xyz, tf.concat([cls_label_one_hot, l0_xyz, orig_points],axis=-1), l2_points, [256,128], is_training, bn_decay, scope='dfp_layer2')
    fp0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, orig_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='dfp_layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fp_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fp_layer2')   
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot,l0_xyz,orig_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fp_layer3')

    # Residual propagation
    rfp2_points = pointnet_fp_module(l0_xyz, l2_xyz, tf.concat([cls_label_one_hot, l0_xyz, orig_points],axis=-1), l2_points, [256,128], is_training, bn_decay, scope='rfp_layer1')
    rfp1_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([cls_label_one_hot, l0_xyz, orig_points],axis=-1), l1_points, [128,128], is_training, bn_decay, scope='rfp_layer2')
    rfp_points = rfp2_points + rfp1_points

    l0_points = fp2_points + fp1_points + fp0_points + l0_points + rfp_points 

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    end_points['feats'] = net

    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')

    pred = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc3')

    if pp_idx is not None:
        pp_pred = get_pp_pred(end_points['feats'], pp_idx)
    else:
        pp_pred = None


    return pred, pp_pred, end_points
Beispiel #17
0
def get_instance_seg_v2_net(point_cloud, one_hot_vec,
                            is_training, bn_decay, end_points):
    ''' 3D instance segmentation PointNet v2 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''

    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,1])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points,
        128, [0.2,0.4,0.8], [32,64,128],
        [[32,32,64], [64,64,128], [64 ,96,128]],
        is_training, bn_decay, scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points,
        32, [0.4,0.8,1.6], [64,64,128],
        [[64,64,128], [128,128,256], [128,128,256]],
        is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points,
        npoint=None, radius=None, nsample=None, mlp=[128,256,1024],
        mlp2=None, group_all=True, is_training=is_training,
        bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l3_points = tf.concat([l3_points, tf.expand_dims(one_hot_vec, 1)], axis=2)
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
        [128,128], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
        [128,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
        tf.concat([l0_xyz,l0_points],axis=-1), l1_points,
        [128,128], is_training, bn_decay, scope='fa_layer3')
    end_points['feats'] = l0_points
    # FC layers
    #print("l0_points",l0_points.shape)
    #print("l0_points",l0_points)
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
        is_training=is_training, scope='conv1d-fc1', bn_decay=bn_decay)
    #print("l0_points",l0_points.shape)
    net = tf_util.dropout(net, keep_prob=0.7,
        is_training=is_training, scope='dp1')
    logits = tf_util.conv1d(net, 2, 1,
        padding='VALID', activation_fn=None, scope='conv1d-fc2')

    return logits, end_points
Beispiel #18
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx3 (XYZ) """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = None

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    ###########CLASSIFICATION BRANCH
    # print(l3_xyz.shape)
    # print(l3_points.shape)
    net = tf.reshape(l3_points, [batch_size, -1])
    # print(net.shape)
    # print()
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)

    # print("Classification feature vector")
    class_vector = tf.expand_dims(net, axis=1)
    # print(class_vector.shape)
    # print()
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    class_pred = tf_util.fully_connected(net, NUM_CLASSES, activation_fn=None, scope='fc3')

    ###########SEGMENTATION BRANCH
    # Feature Propagation layers
    l3_points_concat = tf.concat([l3_points, class_vector], axis=2)

    # l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_concat, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, class_vector, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    # print(l0_points.shape)
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='seg_fc1', bn_decay=bn_decay)
    # print(net.shape)
    # print()
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='seg_dp1')
    seg_pred = tf_util.conv1d(net, 2, 1, padding='VALID', activation_fn=None, scope='seg_fc2')
    # print(seg_pred.shape)
    # exit()

    # print(class_pred.shape)
    # print(seg_pred.shape)
    # exit()

    return class_pred, seg_pred
def get_instance_seg_v2_net(point_cloud, one_hot_vec,
                            is_training, bn_decay, end_points):
    ''' 3D instance segmentation PointNet v2 network.
    Input:
        point_cloud: TF tensor in shape (B,N,4)
            frustum point clouds with XYZ and intensity in point channels
            XYZs are in frustum coordinate
        one_hot_vec: TF tensor in shape (B,3)
            length-3 vectors indicating predicted object type
        is_training: TF boolean scalar
        bn_decay: TF float scalar
        end_points: dict
    Output:
        logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
        end_points: dict
    '''

    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,1])

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points,
        128, [0.2,0.4,0.8], [32,64,128],
        [[32,32,64], [64,64,128], [64,96,128]],
        is_training, bn_decay, scope='layer1')
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points,
        32, [0.4,0.8,1.6], [64,64,128],
        [[64,64,128], [128,128,256], [128,128,256]],
        is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points,
        npoint=None, radius=None, nsample=None, mlp=[128,256,1024],
        mlp2=None, group_all=True, is_training=is_training,
        bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l3_points = tf.concat([l3_points, tf.expand_dims(one_hot_vec, 1)], axis=2)
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
        [128,128], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
        [128,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
        tf.concat([l0_xyz,l0_points],axis=-1), l1_points,
        [128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
        is_training=is_training, scope='conv1d-fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.7,
        is_training=is_training, scope='dp1')
    logits = tf_util.conv1d(net, 2, 1,
        padding='VALID', activation_fn=None, scope='conv1d-fc2')

    return logits, end_points
Beispiel #20
0
def get_model(point_cloud, is_training, num_class, num_embed=5, sigma=0.05, bn_decay=None,is_dist=False):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud[:, :, :3] #[batch_size,number_point,3] contains xyz 3D coordinate information
    l0_points = point_cloud[:, :, 3:]#[batch_size,number_point,6] contains other RGBL..
    end_points['l0_xyz'] = l0_xyz

    # shared encoder
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32, 32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, scope='layer1')
    #l1_xyz [batch_size,1024,3] l1_points [batch_size,1024,64] l1_indices [batch_size,1024,32]
    l2_xyz, l2_points = pointconv_encoding(l1_xyz, l1_points, npoint=256, radius=0.2, sigma=2 * sigma, K=32, mlp=[ 64,  64, 128], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='layer2')
    #l2_xyz [batch_size,256,3] l2_points [batch_size,256,128] 
    l3_xyz, l3_points = pointconv_encoding(l2_xyz, l2_points, npoint=64,  radius=0.4, sigma=4 * sigma, K=32, mlp=[128, 128, 256], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='layer3')
    #l3_xyz [batch_size,64,3] l3_points [batch_size,64,256]
    l4_xyz, l4_points = pointconv_encoding(l3_xyz, l3_points, npoint=32,  radius=0.8, sigma=8 * sigma, K=32, mlp=[256, 256, 512], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='layer4')
    #l4_xyz [batch_size,32,3] l4_points [batch_size,32,512]

    # semantic decoder
    l3_points_sem = pointconv_decoding_depthwise(l3_xyz, l4_xyz, l3_points, l4_points,     radius=0.8, sigma=8*sigma, K=16, mlp=[512, 512], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='sem_fa_layer1')
    #l3_points_sem = [batch_size,64,512]
    l2_points_sem = pointconv_decoding_depthwise(l2_xyz, l3_xyz, l2_points, l3_points_sem, radius=0.4, sigma=4*sigma, K=16, mlp=[256, 256], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='sem_fa_layer2')  
    # batch_size x256x256
    l1_points_sem = pointconv_decoding_depthwise(l1_xyz, l2_xyz, l1_points, l2_points_sem, radius=0.2, sigma=2*sigma, K=16, mlp=[256, 128], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='sem_fa_layer3')  
    # batch_sizex1024x128
    l0_points_sem = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_sem, [128, 128, 128], is_training, bn_decay, is_dist=is_dist, scope='sem_fa_layer4')  
    # bx4096x128

    # instance decoder
    l3_points_ins = pointconv_decoding_depthwise(l3_xyz, l4_xyz, l3_points, l4_points,     radius=0.8, sigma=8*sigma, K=16, mlp=[512, 512], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='ins_fa_layer1')
    l2_points_ins = pointconv_decoding_depthwise(l2_xyz, l3_xyz, l2_points, l3_points_ins, radius=0.4, sigma=4*sigma, K=16, mlp=[256, 256], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='ins_fa_layer2')  # 48x256x256
    l1_points_ins = pointconv_decoding_depthwise(l1_xyz, l2_xyz, l1_points, l2_points_ins, radius=0.2, sigma=2*sigma, K=16, mlp=[256, 128], is_training=is_training, bn_decay=bn_decay, is_dist=is_dist, weight_decay=None, scope='ins_fa_layer3')  # 48x1024x128
    l0_points_ins = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_ins, [128, 128, 128], is_training, bn_decay, is_dist=is_dist, scope='ins_fa_layer4')   # 48x4096x128   

    # FC layers F_sem
    l2_points_sem_up = pointnet_upsample(l0_xyz, l2_xyz, l2_points_sem, scope='sem_up1')#[b,4096,256]
    l1_points_sem_up = pointnet_upsample(l0_xyz, l1_xyz, l1_points_sem, scope='sem_up2')#[b,4096,128]
    net_sem_0 = tf.add(tf.concat([l0_points_sem, l1_points_sem_up], axis=-1, name='sem_up_concat'), l2_points_sem_up, name='sem_up_add')#[b,4096,256]
    net_sem_0 = tf_util.conv1d(net_sem_0, 128, 1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='sem_fc1', bn_decay=bn_decay)
    #[b,4096,128]

    # FC layers F_ins
    l2_points_ins_up = pointnet_upsample(l0_xyz, l2_xyz, l2_points_ins, scope='ins_up1')#[b,4096,256]
    l1_points_ins_up = pointnet_upsample(l0_xyz, l1_xyz, l1_points_ins, scope='ins_up2')#[b,4096,128]
    net_ins_0 = tf.add(tf.concat([l0_points_ins, l1_points_ins_up], axis=-1, name='ins_up_concat'), l2_points_ins_up, name='ins_up_add')#[b,4096,256]
    net_ins_0 = tf_util.conv1d(net_ins_0, 128, 1, padding='VALID', bn=True, is_training=is_training, is_dist=is_dist, scope='ins_fc1', bn_decay=bn_decay)
    #[b,4096,128]

    net_ins_4, net_sem_4 = JSPNet_SIFF_PIFF(net_sem_0,net_ins_0,bn_decay=bn_decay,is_dist=is_dist,is_training=is_training,num_embed=num_embed,num_point=num_point,num_class=num_class)



    return net_sem_4,net_ins_4
Beispiel #21
0
def get_model(point_cloud, is_training, bn_decay=None):
    """ FlowNet3D, for training
        input: Bx(N1+N2)x3,
        output: BxN1x3 """
    end_points = {}
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value // 2

    l0_xyz_f1 = point_cloud[:, :num_point, 0:3]
    l0_points_f1 = point_cloud[:, :num_point, 3:]
    l0_xyz_f2 = point_cloud[:, num_point:, 0:3]
    l0_points_f2 = point_cloud[:, num_point:, 3:]

    RADIUS1 = 0.5
    RADIUS2 = 1.0
    RADIUS3 = 2.0
    RADIUS4 = 4.0
    with tf.variable_scope('sa1') as scope:
        # Frame 1, Layer 1
        l1_xyz_f1, l1_points_f1, l1_indices_f1 = pointnet_sa_module(l0_xyz_f1, l0_points_f1, npoint=1024, radius=RADIUS1, nsample=16, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
        end_points['l1_indices_f1'] = l1_indices_f1

        # Frame 1, Layer 2
        l2_xyz_f1, l2_points_f1, l2_indices_f1 = pointnet_sa_module(l1_xyz_f1, l1_points_f1, npoint=256, radius=RADIUS2, nsample=16, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
        end_points['l2_indices_f1'] = l2_indices_f1

        scope.reuse_variables()
        # Frame 2, Layer 1
        l1_xyz_f2, l1_points_f2, l1_indices_f2 = pointnet_sa_module(l0_xyz_f2, l0_points_f2, npoint=1024, radius=RADIUS1, nsample=16, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
        # Frame 2, Layer 2
        l2_xyz_f2, l2_points_f2, l2_indices_f2 = pointnet_sa_module(l1_xyz_f2, l1_points_f2, npoint=256, radius=RADIUS2, nsample=16, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')

    _, l2_points_f1_new = flow_embedding_module(l2_xyz_f1, l2_xyz_f2, l2_points_f1, l2_points_f2, radius=10.0, nsample=64, mlp=[128,128,128], is_training=is_training, bn_decay=bn_decay, scope='flow_embedding', bn=True, pooling='max', knn=True, corr_func='concat')

    # Layer 3
    l3_xyz_f1, l3_points_f1, l3_indices_f1 = pointnet_sa_module(l2_xyz_f1, l2_points_f1_new, npoint=64, radius=RADIUS3, nsample=8, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    end_points['l3_indices_f1'] = l3_indices_f1

    # Layer 4
    l4_xyz_f1, l4_points_f1, l4_indices_f1 = pointnet_sa_module(l3_xyz_f1, l3_points_f1, npoint=16, radius=RADIUS4, nsample=8, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')
    end_points['l4_indices_f1'] = l4_indices_f1

    # Feature Propagation
    l3_feat_f1 = set_upconv_module(l3_xyz_f1, l4_xyz_f1, l3_points_f1, l4_points_f1, nsample=8, radius=2.4, mlp=[], mlp2=[256,256], scope='up_sa_layer1', is_training=is_training, bn_decay=bn_decay, knn=True)
    l2_feat_f1 = set_upconv_module(l2_xyz_f1, l3_xyz_f1, tf.concat(axis=-1, values=[l2_points_f1, l2_points_f1_new]), l3_feat_f1, nsample=8, radius=1.2, mlp=[128,128,256], mlp2=[256], scope='up_sa_layer2', is_training=is_training, bn_decay=bn_decay, knn=True)
    l1_feat_f1 = set_upconv_module(l1_xyz_f1, l2_xyz_f1, l1_points_f1, l2_feat_f1, nsample=8, radius=0.6, mlp=[128,128,256], mlp2=[256], scope='up_sa_layer3', is_training=is_training, bn_decay=bn_decay, knn=True)
    l0_feat_f1 = pointnet_fp_module(l0_xyz_f1, l1_xyz_f1, l0_points_f1, l1_feat_f1, [256,256], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_feat_f1, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.conv1d(net, 3, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
def get_model(point_cloud, is_training, num_class, bn_decay=None, weight_decay=None, feature_channel=0):
    """ Semantic segmentation PointNet, input is B x N x3 , output B x num_class """
    end_points = {}
    num_point = point_cloud.get_shape()[1].value
    if feature_channel > 0:
        l0_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3])
        l0_points = tf.slice(point_cloud, [0, 0, 3], [-1, -1, feature_channel])
    else:
        l0_xyz = point_cloud
        l0_points = point_cloud

    end_points['l0_xyz'] = l0_xyz
    num_points = [num_point//8, num_point//32, num_point//128, num_point//256]
    _, l0_points = PointASNLSetAbstraction(l0_xyz, l0_points, npoint=num_point, nsample=32, mlp=[16,16,32], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay, scope='layer0', as_neighbor=0, NL=False)

    # 1st Res Layer
    l1_xyz, l1_1_points = PointASNLSetAbstraction(l0_xyz, l0_points, npoint=num_points[0], nsample=32, mlp=[32,32,64], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay, scope='layer1_1', as_neighbor=8)
    _, l1_2_points = PointASNLSetAbstraction(l0_xyz, l0_points, npoint=num_points[0], nsample=32, mlp=[64,64], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay, scope='layer1_2', as_neighbor=0, NL=False)
    l1_2_points += l1_1_points

    # 2nd Res Layer
    l2_xyz, l2_1_points = PointASNLSetAbstraction(l1_xyz, l1_2_points, npoint=num_points[1], nsample=32, mlp=[64,64,128], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay,scope='layer2_1', as_neighbor=4)
    _, l2_2_points = PointASNLSetAbstraction(l2_xyz, l2_1_points, npoint=num_points[1], nsample=32, mlp=[128,128], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay,scope='layer2_2', as_neighbor=0, NL=False)
    l2_2_points += l2_1_points

    # 3rd Res Layer
    l3_xyz, l3_1_points = PointASNLSetAbstraction(l2_xyz, l2_2_points, npoint=num_points[2], nsample=32, mlp=[128,128,256], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay, scope='layer3_1', as_neighbor=0)
    _, l3_2_points = PointASNLSetAbstraction(l3_xyz, l3_1_points, npoint=num_points[2], nsample=32, mlp=[256,256], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay, scope='layer3_2', as_neighbor=0, NL=False)
    l3_2_points += l3_1_points

    # 4th Res Layer
    l4_xyz, l4_1_points = PointASNLSetAbstraction(l3_xyz, l3_1_points, npoint=num_points[3], nsample=32, mlp=[256,256,512], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay, scope='layer4_1', as_neighbor=0)
    _, l4_2_points = PointASNLSetAbstraction(l4_xyz, l4_1_points, npoint=num_points[3], nsample=32, mlp=[512,512], is_training=is_training, bn_decay=bn_decay, weight_decay=weight_decay, scope='layer4_2', as_neighbor=0, NL=False)
    l4_2_points += l4_1_points

    end_points['l1_xyz'] = l1_xyz

    # Feature decoding layers
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_2_points, l4_2_points, [512,512], is_training, bn_decay, scope='fa_layer1', bn=True)
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2', bn=True)
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_2_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3', bn=True)
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4', bn=True)

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', activation_fn=tf.nn.leaky_relu, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay, weight_decay=weight_decay)
    end_points['feats'] = net
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp')
    net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, weight_decay=weight_decay, scope='fc0')

    return net, end_points
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud[:, :, :3]
    l0_points = point_cloud[:, :, 3:]
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points_sem = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='sem_fa_layer1')
    l2_points_sem = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_sem, [256,256], is_training, bn_decay, scope='sem_fa_layer2')
    l1_points_sem = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points_sem, [256,128], is_training, bn_decay, scope='sem_fa_layer3')
    l0_points_sem = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_sem, [128,128,128], is_training, bn_decay, scope='sem_fa_layer4')

    # FC layers
    net_sem = tf_util.conv1d(l0_points_sem, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_fc1', bn_decay=bn_decay)
    net_sem_cache = tf_util.conv1d(net_sem, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='sem_cache',  bn_decay=bn_decay)  

    # ins
    l3_points_ins = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='ins_fa_layer1')
    l2_points_ins = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points_ins, [256,256], is_training, bn_decay, scope='ins_fa_layer2')
    l1_points_ins = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points_ins, [256,128], is_training, bn_decay, scope='ins_fa_layer3')
    l0_points_ins = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points_ins, [128,128,128], is_training, bn_decay, scope='ins_fa_layer4')

    net_ins = tf_util.conv1d(l0_points_ins, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='ins_fc1', bn_decay=bn_decay)

    net_ins = net_ins + net_sem_cache
    net_ins = tf_util.dropout(net_ins, keep_prob=0.5, is_training=is_training, scope='ins_dp1')
    net_ins = tf_util.conv1d(net_ins, 5, 1, padding='VALID', activation_fn=None, scope='ins_fc4')

    k = 40
    adj_matrix = tf_util.pairwise_distance_l1(net_ins)
    nn_idx = tf_util.knn_thres(adj_matrix, k=k)
    nn_idx = tf.stop_gradient(nn_idx)

    net_sem = tf_util.get_local_feature(net_sem, nn_idx=nn_idx, k=k)# [b, n, k, c]
    net_sem = tf.reduce_max(net_sem, axis=-2, keep_dims=False)

    net_sem = tf_util.dropout(net_sem, keep_prob=0.5, is_training=is_training, scope='sem_dp1')
    net_sem = tf_util.conv1d(net_sem, num_class, 1, padding='VALID', activation_fn=None, scope='sem_fc4')

    
    return net_sem, net_ins
Beispiel #24
0
def pointResNet(points,
                mlp,
                is_training,
                bn_decay,
                scope,
                bn=True,
                use_nchw=False):
    '''
    Input:
        points: BxNxC
        mlp :list of num_out_channel
    Return:
        conv_final : BxNxmlp[-1]
      
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    conv = points
    with tf.variable_scope(scope) as sc:
        for j, num_out_channel in enumerate(mlp):
            if j == 0:
                conv = tf_util.conv1d(conv,
                                      num_out_channel,
                                      1,
                                      padding='VALID',
                                      bn=True,
                                      is_training=is_training,
                                      scope='conv1d_%d' % (j),
                                      bn_decay=bn_decay)
                conv0 = conv
            elif j == len(mlp) - 1:
                conv_final = tf_util.conv1d(tf.concat([conv0, conv], 2),
                                            num_out_channel,
                                            1,
                                            padding='VALID',
                                            bn=True,
                                            is_training=is_training,
                                            scope='conv1d_%d' % (j),
                                            bn_decay=bn_decay)
            else:
                conv = tf_util.conv1d(conv,
                                      num_out_channel,
                                      1,
                                      padding='VALID',
                                      bn=True,
                                      is_training=is_training,
                                      scope='conv1d_%d' % (j),
                                      bn_decay=bn_decay)
        return conv_final
Beispiel #25
0
def DiscreteConv(grouped_points, mlp_list, bn, i, is_training, bn_decay,
                 weight, nk, kernel_fit):
    grouped_points = tf.reduce_sum(tf.expand_dims(grouped_points, 4) * weight,
                                   axis=2)
    grouped_points = tf.transpose(grouped_points, [0, 1, 3, 2])

    grouped_points = tf_util.conv2d(grouped_points,
                                    mlp_list[i][1], [1, nk],
                                    padding='VALID',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='conv%d_%d' % (i, 1),
                                    bn_decay=bn_decay)
    new_points = tf.squeeze(grouped_points, axis=2)
    new_points = tf_util.conv1d(new_points,
                                mlp_list[i][2],
                                1,
                                padding='VALID',
                                stride=1,
                                bn=bn,
                                is_training=is_training,
                                scope='conv%d_%d' % (i, 2),
                                bn_decay=bn_decay)
    return new_points
def get_model(point_cloud, num_frames, is_training, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    end_points = {}
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value // num_frames

    l0_xyz = point_cloud[:, :, 0:3]
    l0_time = tf.concat([tf.ones([batch_size, num_point, 1]) * i for i in range(num_frames)], \
            axis=-2)
    l0_points = tf.concat([point_cloud[:, :, 3:], l0_time], axis=-1)

    RADIUS1 = np.array([0.98, 0.99, 1.0], dtype='float32')
    RADIUS2 = RADIUS1 * 2
    RADIUS3 = RADIUS1 * 4
    RADIUS4 = RADIUS1 * 8

    l1_xyz, l1_time, l1_points, l1_indices = meteor_direct_module(l0_xyz, l0_time, l0_points, npoint=2048, radius=RADIUS1, nsample=32, mlp=[32,32,128], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_time, l2_points, l2_indices = meteor_direct_module(l1_xyz, l1_time, l1_points, npoint=512, radius=RADIUS2, nsample=32, mlp=[64,64,256], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_time, l3_points, l3_indices = meteor_direct_module(l2_xyz, l2_time, l2_points, npoint=128, radius=RADIUS3, nsample=32, mlp=[128,128,512], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_time, l4_points, l4_indices = meteor_direct_module(l3_xyz, l3_time, l3_points, npoint=64, radius=RADIUS4, nsample=32, mlp=[256,256,1024], mlp2=None, group_all=False, knn=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128], is_training, bn_decay, scope='fa_layer4')

    ##### debug
    net = tf_util.conv1d(l0_points, 12, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points
Beispiel #27
0
def encoder_with_convs_and_symmetry(in_layer,
                                    n_filters,
                                    filter_sizes,
                                    scope=None,
                                    verbose=False):
    # Student's TODO
    if verbose:
        print('Building Encoder')

    for i in range(n_filters):
        name = 'encoder_' + str(i)
        scope_i = expand_scope_by_name(scope, name)

        if i == 0:
            layer = in_layer
        layer = conv1d(layer, filter_sizes[i], kernel_size=1, scope=scope_i)

        if i == 2:
            keep_layer = layer

    latent_vec = tf.reduce_max(layer, axis=1)

    n_pc_per_model = keep_layer.get_shape()[1]
    dim_latent_vec = latent_vec.get_shape()[1]

    latent_vec_repeated = tf.reshape(tf.tile(latent_vec, [1, n_pc_per_model]),
                                     [-1, n_pc_per_model, dim_latent_vec])
    combo_latent_pt = tf.concat([keep_layer, latent_vec_repeated], axis=-1)

    return latent_vec, combo_latent_pt
def get_pp_pred_2(feats, pp_idx, is_training, bn_decay):
    batch_size = pp_idx.get_shape()[0].value
    num_point_pairs = pp_idx.get_shape()[1].value

    # gather point-pairs
    batch_idx = np.arange(batch_size)
    batch_idx = np.reshape(batch_idx, (batch_size, 1))
    batch_idx = np.tile(batch_idx, 2 * num_point_pairs)
    batch_idx = np.reshape(batch_idx, (batch_size, num_point_pairs, 2))
    pp_idx = tf.stack([batch_idx, pp_idx], axis=-1)
    pp_feats = tf.gather_nd(feats,
                            pp_idx)  # (batch_size,num_point_pairs,2,128)

    # predict similarity labels of the point-pairs

    pp_pred = tf_util.conv2d(pp_feats,
                             128, [1, 2],
                             padding='VALID',
                             stride=[1, 1],
                             bn=True,
                             is_training=is_training,
                             scope='pp_fc1',
                             bn_decay=bn_decay,
                             data_format='NHWC')

    pp_pred = tf.squeeze(pp_pred, axis=[2])
    pp_pred = tf_util.conv1d(pp_pred,
                             1,
                             1,
                             padding='VALID',
                             activation_fn=None,
                             scope='pp_fc2')
    pp_pred = tf.squeeze(pp_pred, axis=[-1])  # (batch_size,num_point_pairs)

    return pp_pred
Beispiel #29
0
def pointNet(points,
             mlp,
             is_training,
             bn_decay,
             scope,
             bn=True,
             use_nchw=False):
    '''
    Input:
        points: BxNxC
        mlp: list of num_out_channel
    Return:
        conv: BxNxmlp[-1]
      
    '''
    data_format = 'NCHW' if use_nchw else 'NHWC'
    conv = points
    with tf.variable_scope(scope) as sc:
        for j, num_out_channel in enumerate(mlp):
            conv = tf_util.conv1d(conv,
                                  num_out_channel,
                                  1,
                                  padding='VALID',
                                  bn=True,
                                  is_training=is_training,
                                  scope='conv1d_%d' % (j),
                                  bn_decay=bn_decay)

        return conv
Beispiel #30
0
def get_displacements(input_points, ske_features, FLAGS, is_training = False, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """

    batch_size = FLAGS.batch_size
    num_points = FLAGS.point_num_out

    point_cloud = input_points

    l0_xyz = point_cloud
    l0_points = None

    # Set Abstraction layers 第一从次2048个点提取1024个点
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1 * FLAGS.radiusScal, nsample=64,
                                                       mlp=[64, 64, 128], mlp2=None, group_all=False,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer1')  ### 最后一个变量scope相当于变量前缀
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=384, radius=0.2* FLAGS.radiusScal, nsample=64,
                                                       mlp=[128, 128, 256], mlp2=None, group_all=False,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=128, radius=0.4* FLAGS.radiusScal, nsample=64,
                                                       mlp=[256, 256, 512], mlp2=None, group_all=False,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # PointNet
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=None, radius=None, nsample=None,
                                                       mlp=[512, 512, 1024], mlp2=None, group_all=True,
                                                       is_training=is_training, bn_decay=bn_decay, scope='layer4')

     ### Feature Propagation layers  #################  featrue maps are interpolated according to coordinate  ################     
    # 根据l4的特征值差值出l3
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [512, 512], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [512, 256], is_training, bn_decay, scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256, 128], is_training, bn_decay, scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128, 128, 128], is_training, bn_decay, scope='fa_layer4')

    # 加入提取的skeleton特征 
    # ske_features : batch_size * featrues
    ske_features = tf.tile(tf.expand_dims(ske_features, 1), [1, num_points, 1])
    l0_points = tf.concat([l0_points, ske_features], axis=-1)
    # 特征转变成 displacement
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay )
    net = tf_util.conv1d(net, 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    net = tf_util.conv1d(net, 3, 1, padding='VALID', activation_fn=None, scope='fc3')

    displacements = tf.sigmoid(net) * FLAGS.range_max * 2 - FLAGS.range_max

    return displacements
Beispiel #31
0
def get_model(point_cloud, is_training, bn=True, bn_decay=None):
    end_points = {}

    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    l0_xyz = point_cloud
    l0_points = point_cloud

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, \
            bn=bn, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, \
            bn=bn, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, \
            bn=bn, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], \
            bn=bn, is_training=is_training, bn_decay=bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], \
            bn=bn, is_training=is_training, bn_decay=bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], \
            bn=bn, is_training=is_training, bn_decay=bn_decay, scope='fa_layer3')

    # semantic segmentation branch
    seg_net = l0_points
    seg_net = tf_util.conv1d(seg_net,
                             256,
                             1,
                             padding='VALID',
                             bn=bn,
                             is_training=is_training,
                             scope='seg/fc1',
                             bn_decay=bn_decay)
    seg_net = tf_util.conv1d(seg_net,
                             256,
                             1,
                             padding='VALID',
                             bn=bn,
                             is_training=is_training,
                             scope='seg/fc2',
                             bn_decay=bn_decay)
    seg_net = tf.expand_dims(seg_net, axis=2)
    print 'PointNet++ Output: ', seg_net
    return seg_net
Beispiel #32
0
def build_pointnet2_seg(scope, X, out_dims, is_training, bn_decay):
    with tf.variable_scope(scope):
        l0_xyz = tf.slice(X, [0,0,0], [-1,-1,3])
        l0_points = tf.slice(X, [0,0,3], [-1,-1,0])

        # Set Abstraction layers
        l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,
                npoint=512, radius=0.2, nsample=64, mlp=[64,64,128],
                mlp2=None, group_all=False, is_training=is_training,
                bn_decay=bn_decay, scope='layer1')

        l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,
                npoint=128, radius=0.4, nsample=64, mlp=[128,128,256],
                mlp2=None, group_all=False, is_training=is_training,
                bn_decay=bn_decay, scope='layer2')

        l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,
                npoint=None, radius=None, nsample=None, mlp=[256,512,1024],
                mlp2=None, group_all=True, is_training=is_training,
                bn_decay=bn_decay, scope='layer3')

        # Feature Propagation layers
        l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
                [256,256], is_training, bn_decay, scope='fa_layer1')

        l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
                [256,128], is_training, bn_decay, scope='fa_layer2')

        l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
                tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128],
                is_training, bn_decay, scope='fa_layer3')

        # FC layers
        net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
                is_training=is_training, scope='fc1', bn_decay=bn_decay)

        net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                scope='dp1')

        results = []
        for idx, out_dim in enumerate(out_dims):
            current_result = tf_util.conv1d(net, out_dim, 1, padding='VALID', activation_fn=None, scope='fc2_{}'.format(idx))
            results.append(current_result)

        return results
Beispiel #33
0
def build_pointnet2_seg(X, out_dim, is_training, bn_decay, scope):
    n_points = X.get_shape()[1].value

    l0_xyz = tf.slice(X, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(X, [0,0,3], [-1,-1,0])

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points,
            npoint=512, radius=0.2, nsample=64, mlp=[64,64,128],
            mlp2=None, group_all=False, is_training=is_training,
            bn_decay=bn_decay, scope='layer1')

    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points,
            npoint=128, radius=0.4, nsample=64, mlp=[128,128,256],
            mlp2=None, group_all=False, is_training=is_training,
            bn_decay=bn_decay, scope='layer2')

    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points,
            npoint=None, radius=None, nsample=None, mlp=[256,512,1024],
            mlp2=None, group_all=True, is_training=is_training,
            bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points,
            [256,256], is_training, bn_decay, scope='fa_layer1')

    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points,
            [256,128], is_training, bn_decay, scope='fa_layer2')

    l0_points = pointnet_fp_module(l0_xyz, l1_xyz,
            tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128],
            is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True,
            is_training=is_training, scope='fc1', bn_decay=bn_decay)

    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
            scope='dp1')

    net = tf_util.conv1d(net, out_dim, 1, padding='VALID', activation_fn=None,
            scope='fc2')

    return net, 0