Example #1
0
def ssc_color_info_abstraction(input,
                               mlp_list,
                               is_training,
                               bn_decay,
                               scope,
                               kernel_size,
                               bn=True):

    kernel_h = kernel_size[0]
    kernel_w = kernel_size[1]
    channel = input.get_shape()[-1]
    with tf.variable_scope(scope) as sc:
        each_layer = basic_tf.conv2d(input,
                                     channel, [1, 1],
                                     padding='SAME',
                                     stride=[1, 1],
                                     bn=bn,
                                     is_training=is_training,
                                     scope='conv_p0',
                                     bn_decay=bn_decay)

        for i, num_out_channel in enumerate(mlp_list):
            print('conv', i)
            input = basic_tf.conv2d(input,
                                    mlp_list[i] // 2, [1, 1],
                                    padding='SAME',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='conv_p1%d' % (i),
                                    bn_decay=bn_decay)
            input = basic_tf.conv2d(input,
                                    mlp_list[i], [kernel_h, kernel_w],
                                    padding='SAME',
                                    stride=[1, 1],
                                    bn=bn,
                                    is_training=is_training,
                                    scope='convp2_%d' % (i),
                                    bn_decay=bn_decay)

            each_layer = tf.concat(
                [each_layer, input],
                -1)  #get a depth_concatened hypercolume (b,h1,w1,sum(mlp))

        #pooling kernel-size[2,2]

        each_layer = basic_tf.max_pool2d(each_layer,
                                         kernel_size=[2, 2],
                                         stride=[1, 1],
                                         padding='SAME',
                                         scope='maxpool1')

        #(b,h2,w2,sum[mlp])
    print('one turn finished')
    return each_layer
Example #2
0
def basic_detectModel(img, is_training, bn_decay, cell_size, num_class,
                      box_num):  #512->4

    with tf.variable_scope('conv_unit1'):

        out = basic_tf.conv2d(img, 16, [3, 3], 'conv_11', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_11', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 32, [3, 3], 'conv_12', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_12', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 64, [3, 3], 'conv_13', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_13', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 128, [3, 3], 'conv_14', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_14', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 256, [3, 3], 'conv_15', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_15', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 512, [3, 3], 'conv_16', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_16', [2, 2], 'SAME')

        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_17', [2, 2], 'SAME')
    with tf.variable_scope('conv_unit2'):

        out1 = basic_tf.conv2d(out, 1024, [3, 3], 'conv_21', [1, 1], 'SAME')
        out1 = basic_tf.conv2d(out1, 512, [1, 1], 'conv_22', [1, 1], 'SAME')

    with tf.variable_scope('fully_connected_unit'):

        out2 = tf.reshape(out1, (int(out1._shape[0]), -1))  #b,8192
        out2 = basic_tf.fully_connected(out2, 4096, 'fc1')
        out2 = basic_tf.dropout(out2, is_training, 'dp1', 0.5)

        out2 = basic_tf.fully_connected(out2, 1024, 'fc2')
        out2 = basic_tf.dropout(out2, is_training, 'dp2', 0.5)

        out2 = basic_tf.fully_connected(
            out2, cell_size[0] * cell_size[1] * (class_num + box_num * 5),
            'fc3')

    with tf.variable_scope('output_unit'):
        n1 = cell_size[0] * cell_size[1] * num_class  #4*4*2=32
        n2 = n1 + cell_size[0] * cell_size[1] * box_num  #32+4*4*2=64

        class_pred = tf.reshape(
            out2[:, 0:n1],
            (-1, cell_size[0], cell_size[1], num_class))  #(b,4,4,2)
        scales = tf.reshape(
            out2[:,
                 n1:n2], (-1, cell_size[0], cell_size[1], box_num))  #(b,4,4,2)
        boxes = tf.reshape(
            out2[:, n2:],
            (-1, cell_size[0], cell_size[1], box_num * 4))  #(b,4,4,8)

        pred = tf.concat([class_pred, scales, boxes], 3)  #(b,4,4,12)

    return pred
Example #3
0
def pointnet_fp_module(xyz1,
                       xyz2,
                       points1,
                       points2,
                       mlp,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       weight_calc=True):  #ok now
    ''' PointNet Feature Propogation (FP) Module
        Input:                                                                                                      
            xyz1: (b, n1, 3) TF tensor                                                              
            xyz2: (b, n2, 3) TF tensor, sparser than xyz1                                           
            points1: (b, n1, n1) TF tensor                                                   
            points2: (b, n2, n2) TF tensor
            mlp: list of int32 -- output size for MLP on each point                                                 
        Return:
            new_points: (b, n1, mlp[-1]) TF tensor
    '''
    with tf.variable_scope(scope) as sc:

        dist, idx = three_nn(xyz1, xyz2)
        dist = tf.maximum(dist, 1e-10)

        if weight_calc:
            norm = tf.reduce_sum(tf.truediv(1.0, dist), axis=2,
                                 keep_dims=True)  #(b,n,3)->(b,n,1)
            norm = tf.tile(norm, [1, 1, 3])  #(b,n,1)->(b,n,3)
            weight = tf.truediv(tf.truediv(1.0, dist), norm)  #(b,n,3)
        else:
            weight = get_weight(True, dist)

        interpolated_points = three_interpolate(points2, idx, weight)

        if points1 is not None:
            new_points1 = tf.concat(
                axis=2, values=[interpolated_points,
                                points1])  # B,ndataset1,nchannel1+nchannel2
        else:
            new_points1 = interpolated_points
        new_points1 = tf.expand_dims(new_points1, 2)
        for i, num_out_channel in enumerate(mlp):
            new_points1 = basic_tf.conv2d(new_points1,
                                          num_out_channel, [1, 1],
                                          padding='VALID',
                                          stride=[1, 1],
                                          bn=bn,
                                          is_training=is_training,
                                          scope='conv_%d' % (i),
                                          bn_decay=bn_decay)
        new_points1 = tf.squeeze(new_points1, [2])  # B,n1,mlp[-1]
        return new_points1
Example #4
0
def input_down_size_unit(img,
                         c_out1,
                         cout_2,
                         is_training,
                         bn_decay,
                         scope,
                         bn=True):

    img1 = basic_tf.conv2d(img,
                           c_out1, [3, 3],
                           padding='SAME',
                           stride=[1, 1],
                           bn=True,
                           is_training=is_training,
                           scope='conv_input0',
                           bn_decay=bn_decay)  #down_size

    img1 = basic_tf.max_pool2d(img1, [3, 3],
                               scope='max_pool_input',
                               stride=[2, 2],
                               padding='SAME')

    img1 = basic_tf.conv2d(img1,
                           cout_2, [3, 3],
                           padding='SAME',
                           stride=[1, 1],
                           bn=True,
                           is_training=is_training,
                           scope='conv_input1',
                           bn_decay=bn_decay)  #down_size

    img1 = basic_tf.max_pool2d(img1, [3, 3],
                               scope='max_pool_input',
                               stride=[2, 2],
                               padding='SAME')

    return img1
Example #5
0
def basic_detectModel(img, is_training, bn_decay, num_class):  #512->4

    with tf.variable_scope('conv_unit1_G'):

        out = basic_tf.conv2d(img, 16, [3, 3], 'conv_11', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_11', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 32, [3, 3], 'conv_12', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_12', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 64, [3, 3], 'conv_13', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_13', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 128, [3, 3], 'conv_14', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_14', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 256, [3, 3], 'conv_15', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_15', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 512, [3, 3], 'conv_16', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_16', [2, 2], 'SAME')

        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_17', [2, 2], 'SAME')
    with tf.variable_scope('conv_unit2'):

        out1 = basic_tf.conv2d(out, 1024, [3, 3], 'conv_21', [1, 1], 'SAME')
        out1 = basic_tf.conv2d(out1, 512, [1, 1], 'conv_22', [1, 1], 'SAME')
        out1 = basic_tf.avg_pool2d(out1, [2, 2], 'pre_avepool', [2, 2], 'SAME')
    with tf.variable_scope('fully_connected_unit_G'):

        out2 = tf.reshape(out1, (int(out1._shape[0]), -1))  #b,8192
        #out2 = basic_tf.fully_connected(out2,4096,'fc1')
        #out2 = basic_tf.dropout(out2,is_training,'dp1',0.5)

        out2 = basic_tf.fully_connected(out2, 1024, 'fc2')
        out2 = basic_tf.dropout(out2, is_training, 'dp2', 0.5)

        out2 = basic_tf.fully_connected(out2, 128, 'fc3')
        out2 = basic_tf.dropout(out2, is_training, 'dp3', 0.5)

    with tf.variable_scope('output_unit_G'):

        pred = basic_tf.fully_connected(out2, (num_class + 4),
                                        'fc4',
                                        activation_fn=None)

    return pred
Example #6
0
def feature_transform_net(inputs, mlp, is_training, bn_decay=None, K=64):
    """ Feature Transform Net, input is BxNx1xK
        mlp:list of output_channels
        Return:
             transformed_inputs (b,n,k)"""
    b = xyz.get_shape()[0].value
    n = xyz.get_shape()[1].value

    net = inputs
    for i, num_out_channel in enumerate(mlp):
        net = basic_tf.conv2d(net,
                              num_out_channel, [1, 1],
                              padding='VALID',
                              stride=[1, 1],
                              bn=bn,
                              is_training=is_training,
                              scope='tconv%d' % (i + 1),
                              bn_decay=bn_decay)

    net = basic_tf.max_pool2d(net, [n, 1], padding='VALID', scope='tmaxpool')

    net = tf.reshape(net, [b, -1])
    net = basic_tf.fully_connected(net,
                                   512,
                                   bn=True,
                                   is_training=is_training,
                                   scope='tfc1',
                                   bn_decay=bn_decay)
    net = basic_tf.fully_connected(net,
                                   256,
                                   bn=True,
                                   is_training=is_training,
                                   scope='tfc2',
                                   bn_decay=bn_decay)

    with tf.variable_scope('transform_feat') as sc:
        weights = tf.get_variable('weights', [256, K * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [K * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    transform = tf.reshape(transform, [b, K, K])
    transformed_inputs = tf.matmul(input, transform)
    return transformed_inputs
Example #7
0
def brute_classify(img, num_class, is_training, bn_decay):  #512->4

    with tf.variable_scope('conv_unit1'):

        out = basic_tf.conv2d(img, 16, [3, 3], 'conv_11', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_11', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 32, [3, 3], 'conv_12', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_12', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 64, [3, 3], 'conv_13', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_13', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 128, [3, 3], 'conv_14', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_14', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 256, [3, 3], 'conv_15', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_15', [2, 2], 'SAME')

        out = basic_tf.conv2d(out, 512, [3, 3], 'conv_16', [1, 1], 'SAME')
        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_16', [2, 2], 'SAME')

        out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_17', [2, 2], 'SAME')
    with tf.variable_scope('conv_unit2'):

        out1 = basic_tf.conv2d(out, 1024, [3, 3], 'conv_21', [1, 1], 'SAME')
        out1 = basic_tf.conv2d(out1, 512, [1, 1], 'conv_22', [1, 1], 'SAME')
        out1 = basic_tf.avg_pool2d(out1, [2, 2], 'pre_avepool', [2, 2], 'SAME')

    with tf.variable_scope('fully_connected_unit'):

        out2 = tf.reshape(out1, (int(out1._shape[0]), -1))  #b,4096
        out2 = basic_tf.fully_connected(out2, 1024, 'fc1')
        out2 = basic_tf.dropout(out2, is_training, 'dp1', 0.5)

        out2 = basic_tf.fully_connected(out2, 128, 'fc2')
        out2 = basic_tf.dropout(out2, is_training, 'dp2', 0.5)

        pred = basic_tf.fully_connected(out2, num_class, 'fc3')
    print(pred)
    return pred
Example #8
0
def output_down_size_unit(input, cout, is_training, bn_decay, scope, bn=True):

    b = input.get_shape()[0].value
    out = basic_tf.avg_pool2d(input, [3, 3],
                              scope='ave_pool_output',
                              stride=[1, 1],
                              padding='SAME')

    out = basic_tf.conv2d(out,
                          cout, [1, 1],
                          padding='SAME',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='conv_output1',
                          bn_decay=bn_decay)  #down_size
    out = basic_tf.max_pool2d(out, [3, 3],
                              scope='max_pool_output',
                              stride=[2, 2],
                              padding='SAME')

    out = tf.reshape(out, (b, -1))  #(-1,2048)
    return out
Example #9
0
def wnet_model_unit(input,
                    mlp_list,
                    kernel_size_list,
                    is_training,
                    bn_decay,
                    scope,
                    bn=True,
                    maxpool=True):

    bf = input._shape[-1]
    with tf.variable_scope(scope) as sc:
        output = basic_tf.max_pool2d(input, [3, 3], 'maxpool_input', [1, 1],
                                     'SAME')
        output = basic_tf.conv2d(output,
                                 bf // 3, [1, 1],
                                 padding='SAME',
                                 stride=[1, 1],
                                 bn=bn,
                                 is_training=is_training,
                                 scope='conv_input',
                                 bn_decay=bn_decay)  #filter(output1)
        for i, ks in enumerate(kernel_size_list):
            kernel_h = ks[0]
            kernel_w = ks[1]
            out = input  #conv from down_sized input
            c_out = mlp_list[i]
            if kernel_h == 1 and kernel_w == 1:
                out = basic_tf.conv2d(
                    out,
                    c_out, [1, 1],
                    padding='SAME',
                    stride=[1, 1],
                    bn=bn,
                    is_training=is_training,
                    scope='conv_p1%d' % (i),
                    bn_decay=bn_decay)  #conv from down_sized input
            else:
                out = basic_tf.conv2d(
                    out,
                    c_out // 2, [1, 1],
                    padding='SAME',
                    stride=[1, 1],
                    bn=bn,
                    is_training=is_training,
                    scope='conv_p0%d' % (i),
                    bn_decay=bn_decay)  #conv from down_sized input
                out = basic_tf.conv2d(
                    out,
                    c_out, [kernel_h, kernel_w],
                    padding='SAME',
                    stride=[1, 1],
                    bn=bn,
                    is_training=is_training,
                    scope='conv_p1%d' % (i),
                    bn_decay=bn_decay)  #conv from down_sized input
            #print(output.shape)
            output = tf.concat([output, out], -1)

        if maxpool:
            output = basic_tf.max_pool2d(output, [3, 3], 'max_pool', [2, 2],
                                         'SAME')

            #(b,h2,w2,sum[mlp])
    print(output.shape, 'one turn finished')

    return output
Example #10
0
def pointnet_AB_module_msg(xyz,
                           points,
                           m,
                           r_list,
                           ns_list,
                           mlp_list,
                           is_training,
                           bn_decay,
                           scope,
                           bn=True,
                           use_xyz=True):
    ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG)
        Input:
            xyz: (b, n, 3) TF tensor
            points: (b, n, c) TF tensor
            m: int32 -- #points sampled in farthest point sampling
            r: list of float32 -- search radius in local region
            ns: list of int32 -- how many points in each local region
            mlp: list of list of int32 -- output size for MLP on each point
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
        Return:
            new_xyz: (b, m, 3) TF tensor
            new_points: (b, m, \sum_k{mlp[k][-1]}) TF tensor
    '''
    with tf.variable_scope(scope) as sc:

        # sampling

        new_xyz = gather_point(xyz, farthest_point_sample(m,
                                                          xyz))  #(b,npoint,3)
        new_points_list = []
        print('sample ok')
        #grouping & convolution mlp
        for i in range(len(r_list)):
            print('grouping', i)
            #grouping
            r = r_list[i]
            ns = ns_list[i]
            idx, pts_cnt = query_ball_point(r, ns, xyz, new_xyz)
            grouped_xyz = group_point(xyz, idx)  #b,m,ns[i]
            grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2),
                                   [1, 1, ns, 1])  #(b,m,ns[i],3)
            if points is not None:
                grouped_points = group_point(points, idx)  #(b,m,ns[i],c)
                if use_xyz:
                    grouped_points = tf.concat([grouped_points, grouped_xyz],
                                               axis=-1)
            else:
                grouped_points = grouped_xyz
            print('convolutioning')
            #convolutional layers
            for j, num_out_channel in enumerate(mlp_list[i]):
                print('conv', j)
                grouped_points = basic_tf.conv2d(
                    grouped_points,
                    num_out_channel, [1, 1],
                    padding='VALID',
                    stride=[1, 1],
                    bn=bn,
                    is_training=is_training,
                    scope='conv%d_%d' % (i, j),
                    bn_decay=bn_decay)  #(b,m,ns[i],mlp[i][-1])

            new_points = tf.reduce_max(grouped_points,
                                       axis=[2])  #(b,m,mlp[i][-1])
            new_points_list.append(new_points)

        new_points_concat = tf.concat(new_points_list,
                                      axis=-1)  #size(b,m,sum_k{mlp[k][-1])
        print('one turn')
        return new_xyz, new_points_concat
Example #11
0
def pointnet_AB_module(xyz,
                       points,
                       m,
                       r,
                       ns,
                       mlp,
                       mlp2,
                       group_all,
                       is_training,
                       bn_decay,
                       scope,
                       bn=True,
                       pooling='max',
                       tnet_spec=None,
                       knn=False,
                       use_xyz=True):
    ''' PointNet Set Abstraction (SA) Module
        Input:
            xyz: (b, n, 3) TF tensor 
            points: (b, n, c) TF tensor 
            m: int32 -- #points sampled in farthest point sampling 
            r: float32 -- search radius in local region
            ns: int32 -- how many points in each local region 
            mlp: list of int32 -- output size for MLP on each point 
            mlp2: list of int32 -- output size for MLP on each region 
            group_all: bool -- group all points into one PC if set true, OVERRIDE   
                npoint, radius and nsample settings
            use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features 
        Return:
            new_xyz: (b,m, 3) TF tensor
            new_points: (b,m, mlp[-1] or mlp2[-1]) TF tensor
            idx: (b,m, ns) int32 -- indices for local regions
    '''
    with tf.variable_scope(scope) as sc:

        #Sampling&Grouping

        if group_all:
            ns = xyz.get_shape()[1].value
            new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(
                xyz, points, use_xyz)
        else:
            new_xyz, new_points, idx, grouped_xyz = sample_and_group(
                m, r, ns, xyz, points, tnet_spec, knn,
                use_xyz)  #here we got the idx from sampling&grouping
        print('convolution')
        #convolutional layer mlp(handling the new_points we got)

        for i, num_out_channel in enumerate(mlp):
            print('conv', i)
            new_points = basic_tf.conv2d(new_points,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv%d' % (i),
                                         bn_decay=bn_decay)

        #pooling
        print('pooling')
        if pooling == 'avg':
            new_points = basic_tf.avg_pool2d(new_points, [1, ns],
                                             stride=[1, 1],
                                             padding='VALID',
                                             scope='avgpool1')
        elif pooling == 'weighted_avg':
            with tf.variable_scope('weighted_avg1'):
                dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True)
                exp_dists = tf.exp(-dists * 5)
                weights = exp_dists / tf.reduce_sum(
                    exp_dists, axis=2, keep_dims=True)  # (b, m, ns, 1)
                new_points *= weights  # (b, m, ns, mlp[-1])
                new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
        elif pooling == 'max':
            new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True)
        elif pooling == 'min':
            new_points = basic_tf.max_pool2d(-1 * new_points, [1, ns],
                                             stride=[1, 1],
                                             padding='VALID',
                                             scope='minpool1')
        elif pooling == 'max_and_avg':
            avg_points = basic_tf.max_pool2d(new_points, [1, ns],
                                             stride=[1, 1],
                                             padding='VALID',
                                             scope='maxpool1')
            max_points = basic_tf.avg_pool2d(new_points, [1, ns],
                                             stride=[1, 1],
                                             padding='VALID',
                                             scope='avgpool1')
            new_points = tf.concat([avg_points, max_points], axis=-1)

        #convolutional layer mlp2

        if mlp2 is None: mlp2 = []
        for i, num_out_channel in enumerate(mlp2):
            new_points = basic_tf.conv2d(new_points,
                                         num_out_channel, [1, 1],
                                         padding='VALID',
                                         stride=[1, 1],
                                         bn=bn,
                                         is_training=is_training,
                                         scope='conv_post_%d' % (i),
                                         bn_decay=bn_decay)

        #prepare the result

        new_points = tf.squeeze(new_points, [2])  # (b,m,mlp2[-1])
        print('1 turn')
        return new_xyz, new_points, idx
Example #12
0
def input_transform_net(xyz, mlp, is_training, bn_decay=None, K=3):
    """ Input (XYZ) Transform Net, input is BxNx3 gray image
        mlp:list of output_channels
        Return:           
            transformed_xyz (b,n,3)"""
    b = xyz.get_shape()[0].value
    n = xyz.get_shape()[1].value
    print('transing')
    input = tf.expand_dims(xyz, -1)  #(b,n,3,1)
    for i, num_out_channel in enumerate(mlp):
        if i == 0:
            net = basic_tf.conv2d(input,
                                  num_out_channel, [1, 3],
                                  padding='VALID',
                                  stride=[1, 1],
                                  bn=True,
                                  is_training=is_training,
                                  scope='tconv%d' % (i + 1),
                                  bn_decay=bn_decay)
        else:
            net = basic_tf.conv2d(net,
                                  num_out_channel, [1, 1],
                                  padding='VALID',
                                  stride=[1, 1],
                                  bn=bn_decay,
                                  is_training=is_training,
                                  scope='tconv%d' % (i + 1),
                                  bn_decay=bn_decay)  #(b,n,mlp[-1])

    net = basic_tf.max_pool2d(net, [n, 1], padding='VALID',
                              scope='tmaxpool')  #(b,1,mlp[-1])

    net = tf.reshape(net, [b, -1])  #(b * mlp[-1])
    net = basic_tf.fully_connected(net,
                                   512,
                                   bn=True,
                                   is_training=is_training,
                                   scope='tfc1',
                                   bn_decay=bn_decay)  #(b,512)
    net = basic_tf.fully_connected(net,
                                   256,
                                   bn=True,
                                   is_training=is_training,
                                   scope='tfc2',
                                   bn_decay=bn_decay)  #(b,256)
    print('transing2')
    with tf.variable_scope('transform_XYZ') as sc:
        assert (K == 3)
        weights = tf.get_variable('weights', [256, 3 * K],
                                  initializer=tf.constant_initializer(0.0),
                                  dtype=tf.float32)
        biases = tf.get_variable('biases', [3 * K],
                                 initializer=tf.constant_initializer(0.0),
                                 dtype=tf.float32)
        biases += tf.constant([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)  #now,net_shape:(b,3*K)

    transform = tf.reshape(transform, [b, 3, K])
    transformed_xyz = tf.matmul(xyz, transform)
    return transformed_xyz
Example #13
0
def color_net(rgb, is_training, bn_decay=None):

    with tf.variable_scope('input_layer'):
        h = rgb.get_shape()[1].value
        w = rgb.get_shape()[2].value
        b = rgb.get_shape()[0].value

        og = [h, w]
        end_data = {}
        end_data['rgb_data'] = rgb
        #end_data['srgb_data']=srgb    #both of them are been normalized
        out1 = basic_tf.conv2d(rgb, 96, [1, 1], 'input_conv', [1, 1], 'SAME')
        out1 = basic_tf.max_pool2d(out1, [2, 2], 'input_pool', [1, 1], 'SAME')

    with tf.variable_scope('intermidate_layer'):
        for i, kernels in enumerate(list_of_kernel):
            mlps = list_of_mlplist[i]
            out1 = bm.ssc_color_info_abstraction(out1,
                                                 mlps,
                                                 is_training=is_training,
                                                 bn_decay=bn_decay,
                                                 scope='ssc_section_%d' % (i),
                                                 kernel_size=kernels,
                                                 bn=True)
            if i == 0:
                hyper_colume = out1
            else:
                hyper_colume = tf.concat([hyper_colume, out1], -1)

        hyper_colume = basic_tf.avg_pool2d(hyper_colume, [2, 2],
                                           'medium_avepool', [1, 1], 'SAME')
        c = hyper_colume.get_shape()[-1].value
        print(hyper_colume.shape)
        hyper_colume = tf.reshape(hyper_colume, (b * h * w, c))

    with tf.variable_scope('output_layer'):

        out = basic_tf.fully_connected(hyper_colume,
                                       256,
                                       bn=True,
                                       is_training=is_training,
                                       scope='fc2',
                                       bn_decay=bn_decay)
        out = basic_tf.dropout(out,
                               keep_prob=0.5,
                               is_training=is_training,
                               scope='dp2')
        out = basic_tf.fully_connected(out,
                                       64,
                                       bn=True,
                                       is_training=is_training,
                                       scope='fc3',
                                       bn_decay=bn_decay)
        out = basic_tf.dropout(out,
                               keep_prob=0.5,
                               is_training=is_training,
                               scope='dp3')
        out = basic_tf.fully_connected(out,
                                       3,
                                       bn=True,
                                       is_training=is_training,
                                       scope='fc4',
                                       bn_decay=bn_decay)

        pred = tf.reshape(out, (b, h, w, 3))

    return pred, end_data