def build_graph(input_pl, is_training, keep_prob, weight_decay=0.0, bn_decay=None, reuse_layers=True): print("\nNetowrk Input: ", input_pl) net = tf.image.resize_images(input_pl, [cfg.im_dim, cfg.im_dim]) net = tf.layers.conv2d( inputs=net, filters=32, kernel_size=[5, 5], # strides=(1, 1), # padding='valid', # data_format='channels_last', activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), # kernel_regularizer=l2_reg, name='conv2d_layer_1', reuse=reuse_layers) # implement batch normalization if bn_decay: net = do_bn(net, is_training) net = tf.layers.conv2d( inputs=net, filters=48, kernel_size=[5, 5], activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='conv2d_layer_2', reuse=reuse_layers) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool') if bn_decay: net = do_bn(net, is_training) net = tf.layers.conv2d( inputs=net, filters=64, kernel_size=[3, 3], activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='conv2d_layer_3', reuse=reuse_layers) if bn_decay: net = do_bn(net, is_training) net = tf.layers.conv2d( inputs=net, filters=64, kernel_size=[3, 3], activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='conv2d_layer_4', reuse=reuse_layers) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool') if bn_decay: net = do_bn(net, is_training) net = tf.layers.conv2d( inputs=net, filters=128, kernel_size=[3, 3], activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='conv2d_layer_5', reuse=reuse_layers) print("\nFinal Conv Shape: ", net.shape) if bn_decay: net = do_bn(net, is_training) net = tf.contrib.layers.flatten(net) print("\nshape after flatenning: ", net.shape) net = tf.layers.dense( inputs=net, units=512, activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name='fc_1', reuse=reuse_layers) if bn_decay: net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf.layers.dense( inputs=net, units=128, # units=256, activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer(), name='fc_2', reuse=reuse_layers) embed_logits = tf.nn.l2_normalize(net, axis=-1) class_logits = tf.layers.dense( inputs=net, units=cfg.num_classes, activation=tf.nn.softmax, # activation=tf.nn.sigmoid, use_bias=True, kernel_initializer=tf.truncated_normal_initializer(stddev=0.1), bias_initializer=tf.zeros_initializer(), name='classification_output', reuse=reuse_layers) tf.summary.histogram('embed_outputs', embed_logits) print("\nShape of logits: ", embed_logits.shape) return embed_logits, class_logits
def build_graph_multi(input_pl, is_training, weight_decay=0.0, keep_prob=1.0, bn_decay=None): print("\nNetowrk Input: ", input_pl) with tf.device('/device:GPU:1'): net = tf.image.resize_images(input_pl, [cfg.im_dim, cfg.im_dim]) net = tf_ops.conv2d(net, 32, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 32, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1_1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 48, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 48, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2_2', bn_decay=bn_decay) net = do_bn(net, is_training) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool') net = tf_ops.conv2d(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) with tf.device('/device:GPU:2'): net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool') net = tf_ops.conv2d(net, 128, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 128, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv6', bn_decay=bn_decay) print("\nfinal conv shape: ", net.shape) net = tf.nn.relu(net) net = do_bn(net, is_training) # Symmetric function: max pooling # net = tf_ops.max_pool2d(net, [3,3], padding='VALID', scope='maxpool') # net = tf.reshape(net, [cfg.batch_size, -1]) net = tf.contrib.layers.flatten(net) print("\nshape after flatenning: ", net.shape) net = tf_ops.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf_ops.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp2') net = tf_ops.fully_connected(net, cfg.num_classes, scope='fc3') # net = tf.nn.sigmoid(net, name="output_node") net = tf.nn.softmax(net, name="output_node") print("\nShape of logits: ", net.shape) return net
def build_graph_old(input_pl, is_training, weight_decay=0.0, keep_prob=1.0, bn_decay=None): print("\nNetowrk Input: ", input_pl) pool_num = int(cfg.num_points / 3) net = tf_ops.conv2d(input_pl, 64, [1, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) print("\nshape after input: ", net.shape) net = tf.nn.relu(net) # skip_pool = tf_ops.max_pool2d(net, [pool_num, 1], # padding='VALID', scope='maxpool') net = tf_ops.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay) net = tf.nn.relu(net) print("\nshape before max pool: ", net.shape) # Symmetric function: max pooling # net = tf_ops.max_pool2d(net, [pool_num, 1], net = tf_ops.max_pool2d(net, [cfg.num_points, 1], padding='VALID', scope='maxpool') # skip_multiply = tf.multiply(net, skip_pool) # net = tf.add(net, skip_multiply) print("\nshape after skip pool: ", net.shape) net = tf.contrib.layers.flatten(net) print("\nshape after flatenning: ", net.shape) net = tf_ops.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf_ops.fully_connected(net, 128, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp2') net = tf_ops.fully_connected(net, cfg.num_classes, scope='fc3') net = tf.nn.relu(net, name="output_node") print("\nShape of logits: ", net.shape) return net
def build_graph(input_pl, is_training, weight_decay=0.0, keep_prob=1.0, bn_decay=None): print("\nNetowrk Input: ", input_pl) net = tf.image.resize_images(input_pl, [cfg.im_dim, cfg.im_dim]) net = tf_ops.conv2d(net, 32, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 48, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1_1', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool_1') net = inception2d(net, 64, is_training, bn_decay, name='incp_1') net = do_bn(net, is_training) net = inception2d(net, 64, is_training, bn_decay, name='incp_2') net = do_bn(net, is_training) net = inception2d(net, 64, is_training, bn_decay, name='incp_3') net = do_bn(net, is_training) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool_2') net = inception2d(net, 96, is_training, bn_decay, name='incp_4') net = do_bn(net, is_training) net = inception2d(net, 96, is_training, bn_decay, name='incp_5') net = do_bn(net, is_training) net = inception2d(net, 128, is_training, bn_decay, name='incp_6') net = do_bn(net, is_training) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool_3') print("\nfinal conv shape: ", net.shape) net = tf.contrib.layers.flatten(net) print("\nshape after flatenning: ", net.shape) net = tf_ops.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf_ops.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp2') net = tf_ops.fully_connected(net, cfg.num_classes, scope='fc3') # net = tf.nn.sigmoid(net, name="output_node") net = tf.nn.softmax(net, name="output_node") print("\nShape of logits: ", net.shape) return net