def basic_detectModel(img, is_training, bn_decay, num_class): #512->4 with tf.variable_scope('conv_unit1_G'): out = basic_tf.conv2d(img, 16, [3, 3], 'conv_11', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_11', [2, 2], 'SAME') out = basic_tf.conv2d(out, 32, [3, 3], 'conv_12', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_12', [2, 2], 'SAME') out = basic_tf.conv2d(out, 64, [3, 3], 'conv_13', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_13', [2, 2], 'SAME') out = basic_tf.conv2d(out, 128, [3, 3], 'conv_14', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_14', [2, 2], 'SAME') out = basic_tf.conv2d(out, 256, [3, 3], 'conv_15', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_15', [2, 2], 'SAME') out = basic_tf.conv2d(out, 512, [3, 3], 'conv_16', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_16', [2, 2], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_17', [2, 2], 'SAME') with tf.variable_scope('conv_unit2'): out1 = basic_tf.conv2d(out, 1024, [3, 3], 'conv_21', [1, 1], 'SAME') out1 = basic_tf.conv2d(out1, 512, [1, 1], 'conv_22', [1, 1], 'SAME') out1 = basic_tf.avg_pool2d(out1, [2, 2], 'pre_avepool', [2, 2], 'SAME') with tf.variable_scope('fully_connected_unit_G'): out2 = tf.reshape(out1, (int(out1._shape[0]), -1)) #b,8192 #out2 = basic_tf.fully_connected(out2,4096,'fc1') #out2 = basic_tf.dropout(out2,is_training,'dp1',0.5) out2 = basic_tf.fully_connected(out2, 1024, 'fc2') out2 = basic_tf.dropout(out2, is_training, 'dp2', 0.5) out2 = basic_tf.fully_connected(out2, 128, 'fc3') out2 = basic_tf.dropout(out2, is_training, 'dp3', 0.5) with tf.variable_scope('output_unit_G'): pred = basic_tf.fully_connected(out2, (num_class + 4), 'fc4', activation_fn=None) return pred
def brute_classify(img, num_class, is_training, bn_decay): #512->4 with tf.variable_scope('conv_unit1'): out = basic_tf.conv2d(img, 16, [3, 3], 'conv_11', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_11', [2, 2], 'SAME') out = basic_tf.conv2d(out, 32, [3, 3], 'conv_12', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_12', [2, 2], 'SAME') out = basic_tf.conv2d(out, 64, [3, 3], 'conv_13', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_13', [2, 2], 'SAME') out = basic_tf.conv2d(out, 128, [3, 3], 'conv_14', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_14', [2, 2], 'SAME') out = basic_tf.conv2d(out, 256, [3, 3], 'conv_15', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_15', [2, 2], 'SAME') out = basic_tf.conv2d(out, 512, [3, 3], 'conv_16', [1, 1], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_16', [2, 2], 'SAME') out = basic_tf.max_pool2d(out, [2, 2], 'maxpool_17', [2, 2], 'SAME') with tf.variable_scope('conv_unit2'): out1 = basic_tf.conv2d(out, 1024, [3, 3], 'conv_21', [1, 1], 'SAME') out1 = basic_tf.conv2d(out1, 512, [1, 1], 'conv_22', [1, 1], 'SAME') out1 = basic_tf.avg_pool2d(out1, [2, 2], 'pre_avepool', [2, 2], 'SAME') with tf.variable_scope('fully_connected_unit'): out2 = tf.reshape(out1, (int(out1._shape[0]), -1)) #b,4096 out2 = basic_tf.fully_connected(out2, 1024, 'fc1') out2 = basic_tf.dropout(out2, is_training, 'dp1', 0.5) out2 = basic_tf.fully_connected(out2, 128, 'fc2') out2 = basic_tf.dropout(out2, is_training, 'dp2', 0.5) pred = basic_tf.fully_connected(out2, num_class, 'fc3') print(pred) return pred
def output_down_size_unit(input, cout, is_training, bn_decay, scope, bn=True): b = input.get_shape()[0].value out = basic_tf.avg_pool2d(input, [3, 3], scope='ave_pool_output', stride=[1, 1], padding='SAME') out = basic_tf.conv2d(out, cout, [1, 1], padding='SAME', stride=[1, 1], bn=True, is_training=is_training, scope='conv_output1', bn_decay=bn_decay) #down_size out = basic_tf.max_pool2d(out, [3, 3], scope='max_pool_output', stride=[2, 2], padding='SAME') out = tf.reshape(out, (b, -1)) #(-1,2048) return out
def pointnet_AB_module(xyz, points, m, r, ns, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', tnet_spec=None, knn=False, use_xyz=True): ''' PointNet Set Abstraction (SA) Module Input: xyz: (b, n, 3) TF tensor points: (b, n, c) TF tensor m: int32 -- #points sampled in farthest point sampling r: float32 -- search radius in local region ns: int32 -- how many points in each local region mlp: list of int32 -- output size for MLP on each point mlp2: list of int32 -- output size for MLP on each region group_all: bool -- group all points into one PC if set true, OVERRIDE npoint, radius and nsample settings use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features Return: new_xyz: (b,m, 3) TF tensor new_points: (b,m, mlp[-1] or mlp2[-1]) TF tensor idx: (b,m, ns) int32 -- indices for local regions ''' with tf.variable_scope(scope) as sc: #Sampling&Grouping if group_all: ns = xyz.get_shape()[1].value new_xyz, new_points, idx, grouped_xyz = sample_and_group_all( xyz, points, use_xyz) else: new_xyz, new_points, idx, grouped_xyz = sample_and_group( m, r, ns, xyz, points, tnet_spec, knn, use_xyz) #here we got the idx from sampling&grouping print('convolution') #convolutional layer mlp(handling the new_points we got) for i, num_out_channel in enumerate(mlp): print('conv', i) new_points = basic_tf.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv%d' % (i), bn_decay=bn_decay) #pooling print('pooling') if pooling == 'avg': new_points = basic_tf.avg_pool2d(new_points, [1, ns], stride=[1, 1], padding='VALID', scope='avgpool1') elif pooling == 'weighted_avg': with tf.variable_scope('weighted_avg1'): dists = tf.norm(grouped_xyz, axis=-1, ord=2, keep_dims=True) exp_dists = tf.exp(-dists * 5) weights = exp_dists / tf.reduce_sum( exp_dists, axis=2, keep_dims=True) # (b, m, ns, 1) new_points *= weights # (b, m, ns, mlp[-1]) new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True) elif pooling == 'max': new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True) elif pooling == 'min': new_points = basic_tf.max_pool2d(-1 * new_points, [1, ns], stride=[1, 1], padding='VALID', scope='minpool1') elif pooling == 'max_and_avg': avg_points = basic_tf.max_pool2d(new_points, [1, ns], stride=[1, 1], padding='VALID', scope='maxpool1') max_points = basic_tf.avg_pool2d(new_points, [1, ns], stride=[1, 1], padding='VALID', scope='avgpool1') new_points = tf.concat([avg_points, max_points], axis=-1) #convolutional layer mlp2 if mlp2 is None: mlp2 = [] for i, num_out_channel in enumerate(mlp2): new_points = basic_tf.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope='conv_post_%d' % (i), bn_decay=bn_decay) #prepare the result new_points = tf.squeeze(new_points, [2]) # (b,m,mlp2[-1]) print('1 turn') return new_xyz, new_points, idx
def color_net(rgb, is_training, bn_decay=None): with tf.variable_scope('input_layer'): h = rgb.get_shape()[1].value w = rgb.get_shape()[2].value b = rgb.get_shape()[0].value og = [h, w] end_data = {} end_data['rgb_data'] = rgb #end_data['srgb_data']=srgb #both of them are been normalized out1 = basic_tf.conv2d(rgb, 96, [1, 1], 'input_conv', [1, 1], 'SAME') out1 = basic_tf.max_pool2d(out1, [2, 2], 'input_pool', [1, 1], 'SAME') with tf.variable_scope('intermidate_layer'): for i, kernels in enumerate(list_of_kernel): mlps = list_of_mlplist[i] out1 = bm.ssc_color_info_abstraction(out1, mlps, is_training=is_training, bn_decay=bn_decay, scope='ssc_section_%d' % (i), kernel_size=kernels, bn=True) if i == 0: hyper_colume = out1 else: hyper_colume = tf.concat([hyper_colume, out1], -1) hyper_colume = basic_tf.avg_pool2d(hyper_colume, [2, 2], 'medium_avepool', [1, 1], 'SAME') c = hyper_colume.get_shape()[-1].value print(hyper_colume.shape) hyper_colume = tf.reshape(hyper_colume, (b * h * w, c)) with tf.variable_scope('output_layer'): out = basic_tf.fully_connected(hyper_colume, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) out = basic_tf.dropout(out, keep_prob=0.5, is_training=is_training, scope='dp2') out = basic_tf.fully_connected(out, 64, bn=True, is_training=is_training, scope='fc3', bn_decay=bn_decay) out = basic_tf.dropout(out, keep_prob=0.5, is_training=is_training, scope='dp3') out = basic_tf.fully_connected(out, 3, bn=True, is_training=is_training, scope='fc4', bn_decay=bn_decay) pred = tf.reshape(out, (b, h, w, 3)) return pred, end_data