def build_graph_multi(input_pl, is_training, weight_decay=0.0, keep_prob=1.0, bn_decay=None): print("\nNetowrk Input: ", input_pl) with tf.device('/device:GPU:1'): net = tf.image.resize_images(input_pl, [cfg.im_dim, cfg.im_dim]) net = tf_ops.conv2d(net, 32, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 32, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1_1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 48, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 48, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2_2', bn_decay=bn_decay) net = do_bn(net, is_training) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool') net = tf_ops.conv2d(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) with tf.device('/device:GPU:2'): net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool') net = tf_ops.conv2d(net, 128, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 128, [3, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv6', bn_decay=bn_decay) print("\nfinal conv shape: ", net.shape) net = tf.nn.relu(net) net = do_bn(net, is_training) # Symmetric function: max pooling # net = tf_ops.max_pool2d(net, [3,3], padding='VALID', scope='maxpool') # net = tf.reshape(net, [cfg.batch_size, -1]) net = tf.contrib.layers.flatten(net) print("\nshape after flatenning: ", net.shape) net = tf_ops.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf_ops.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp2') net = tf_ops.fully_connected(net, cfg.num_classes, scope='fc3') # net = tf.nn.sigmoid(net, name="output_node") net = tf.nn.softmax(net, name="output_node") print("\nShape of logits: ", net.shape) return net
def build_graph_old(input_pl, is_training, weight_decay=0.0, keep_prob=1.0, bn_decay=None): print("\nNetowrk Input: ", input_pl) pool_num = int(cfg.num_points / 3) net = tf_ops.conv2d(input_pl, 64, [1, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) print("\nshape after input: ", net.shape) net = tf.nn.relu(net) # skip_pool = tf_ops.max_pool2d(net, [pool_num, 1], # padding='VALID', scope='maxpool') net = tf_ops.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay) net = tf.nn.relu(net) print("\nshape before max pool: ", net.shape) # Symmetric function: max pooling # net = tf_ops.max_pool2d(net, [pool_num, 1], net = tf_ops.max_pool2d(net, [cfg.num_points, 1], padding='VALID', scope='maxpool') # skip_multiply = tf.multiply(net, skip_pool) # net = tf.add(net, skip_multiply) print("\nshape after skip pool: ", net.shape) net = tf.contrib.layers.flatten(net) print("\nshape after flatenning: ", net.shape) net = tf_ops.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf_ops.fully_connected(net, 128, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp2') net = tf_ops.fully_connected(net, cfg.num_classes, scope='fc3') net = tf.nn.relu(net, name="output_node") print("\nShape of logits: ", net.shape) return net
def build_graph(input_pl, is_training, weight_decay=0.0, keep_prob=1.0, bn_decay=None): print("\nNetowrk Input: ", input_pl) net = tf.image.resize_images(input_pl, [cfg.im_dim, cfg.im_dim]) net = tf_ops.conv2d(net, 32, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.conv2d(net, 48, [5, 5], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='conv1_1', bn_decay=bn_decay) net = tf.nn.relu(net) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool_1') net = inception2d(net, 64, is_training, bn_decay, name='incp_1') net = do_bn(net, is_training) net = inception2d(net, 64, is_training, bn_decay, name='incp_2') net = do_bn(net, is_training) net = inception2d(net, 64, is_training, bn_decay, name='incp_3') net = do_bn(net, is_training) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool_2') net = inception2d(net, 96, is_training, bn_decay, name='incp_4') net = do_bn(net, is_training) net = inception2d(net, 96, is_training, bn_decay, name='incp_5') net = do_bn(net, is_training) net = inception2d(net, 128, is_training, bn_decay, name='incp_6') net = do_bn(net, is_training) net = tf_ops.max_pool2d(net, [3, 3], padding='VALID', scope='maxpool_3') print("\nfinal conv shape: ", net.shape) net = tf.contrib.layers.flatten(net) print("\nshape after flatenning: ", net.shape) net = tf_ops.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp1') net = tf_ops.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) net = tf.nn.relu(net) net = do_bn(net, is_training) net = tf_ops.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dp2') net = tf_ops.fully_connected(net, cfg.num_classes, scope='fc3') # net = tf.nn.sigmoid(net, name="output_node") net = tf.nn.softmax(net, name="output_node") print("\nShape of logits: ", net.shape) return net