def getModel(x, num_output, K, stages, wd, is_training, transfer_mode= False): with tf.variable_scope('conv1'): x = common.spatialConvolution(x, 3, 1, 2*K, wd= wd) # x = common.batchNormalization(x, is_training= is_training) # x = tf.nn.relu(x) # x = common.maxPool(x, 3, 2) print x with tf.variable_scope('block0'): x = block(x, stages[0], K, is_training= is_training, wd= wd) print x with tf.variable_scope('trans1'): x = transition(x, K, wd= wd, is_training= is_training) print x with tf.variable_scope('block2'): x = block(x, stages[1], K, is_training= is_training, wd= wd) print x with tf.variable_scope('trans2'): x = transition(x, K, wd= wd, is_training= is_training) print x with tf.variable_scope('block3'): x = block(x, stages[2], K, is_training= is_training, wd= wd) print x x = common.avgPool(x,8,1, padding='VALID') x= common.flatten(x) if not transfer_mode: with tf.variable_scope('output'): x = common.fullyConnected(x, num_output, wd= wd) else: with tf.variable_scope('transfer_output'): x = common.fullyConnected(x, num_output, wd= wd) return x
def inference(x, num_output, wd, dropout_rate, is_training, transfer_mode=False): with tf.variable_scope('conv1'): network = common.spatialConvolution(x, 11, 4, 64, wd=wd) network = common.batchNormalization(network, is_training=is_training) network = tf.nn.relu(network) #common.activation_summary(network) network = common.maxPool(network, 3, 2) with tf.variable_scope('conv2'): network = common.spatialConvolution(network, 5, 1, 192, wd=wd) network = common.batchNormalization(network, is_training=is_training) network = tf.nn.relu(network) #common.activation_summary(network) network = common.maxPool(network, 3, 2) with tf.variable_scope('conv3'): network = common.spatialConvolution(network, 3, 1, 384, wd=wd) network = common.batchNormalization(network, is_training=is_training) network = tf.nn.relu(network) #common.activation_summary(network) with tf.variable_scope('conv4'): network = common.spatialConvolution(network, 3, 1, 256, wd=wd) network = common.batchNormalization(network, is_training=is_training) network = tf.nn.relu(network) with tf.variable_scope('conv5'): network = common.spatialConvolution(network, 3, 1, 256, wd=wd) network = common.batchNormalization(network, is_training=is_training) network = tf.nn.relu(network) network = common.maxPool(network, 3, 2) network = common.flatten(network) with tf.variable_scope('fc1'): network = tf.nn.dropout(network, dropout_rate) network = common.fullyConnected(network, 4096, wd=wd) network = common.batchNormalization(network, is_training=is_training) network = tf.nn.relu(network) with tf.variable_scope('fc2'): network = tf.nn.dropout(network, dropout_rate) network = common.fullyConnected(network, 4096, wd=wd) network = common.batchNormalization(network, is_training=is_training) network = tf.nn.relu(network) if not transfer_mode: with tf.variable_scope('output'): network = common.fullyConnected(network, num_output, wd=wd) else: with tf.variable_scope('transfer_output'): network = common.fullyConnected(network, num_output, wd=wd) return network
def inference(x, num_output, wd, dropout_rate, is_training, transfer_mode= False): conv_weight_initializer = tf.truncated_normal_initializer(stddev= 0.1) fc_weight_initializer = tf.truncated_normal_initializer(stddev= 0.01) with tf.variable_scope('conv1'): network = common.spatialConvolution(x, 11, 4, 64, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu (network) #common.activation_summary(network) network = common.maxPool(network, 3, 2) with tf.variable_scope('conv2'): network = common.spatialConvolution(network, 5, 1, 192, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) #common.activation_summary(network) network = common.maxPool(network, 3, 2) with tf.variable_scope('conv3'): network = common.spatialConvolution(network, 3, 1, 384, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) #common.activation_summary(network) with tf.variable_scope('conv4'): network = common.spatialConvolution(network, 3, 1, 256, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) with tf.variable_scope('conv5'): network = common.spatialConvolution(network, 3, 1, 256, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) network = common.maxPool(network, 3, 2) network = common.flatten(network) with tf.variable_scope('fc1'): network = tf.nn.dropout(network, dropout_rate) network = common.fullyConnected(network, 4096, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) with tf.variable_scope('fc2'): network = tf.nn.dropout(network, dropout_rate) network = common.fullyConnected(network, 4096, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) output = [None]*len(num_output) for o in xrange(0,len(num_output)): with tf.variable_scope('output'+str(o)): output[o] = common.fullyConnected(network, num_output[o], weight_initializer= fc_weight_initializer, bias_initializer= tf.zeros_initializer, wd= wd) return output
def inference(x, num_output, wd, dropout_rate, is_training, transfer_mode= False, model_type= 'A'): # Create tables describing VGG configurations A, B, D, E if model_type == 'A': config = [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'] elif model_type == 'B': config = [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'] elif model_type == 'D': config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'] elif model_type == 'E': config = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'] else: print('Unknown model type: ' + model_type + ' | Please specify a modelType A or B or D or E') network= x for k,v in enumerate(config): if v == 'M': network= common.maxPool(network, 2, 2) else: with tf.variable_scope('conv'+str(k)): network = common.spatialConvolution(network, 3, 1, v, wd= wd) network = tf.nn.relu(network) network= common.flatten(network) with tf.variable_scope('fc1'): network = common.fullyConnected(network, 4096, wd= wd) network = tf.nn.relu(network) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.dropout(network, dropout_rate) with tf.variable_scope('fc2'): network = common.fullyConnected(network, 4096, wd= wd) network = tf.nn.relu(network) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.dropout(network, dropout_rate) if not transfer_mode: with tf.variable_scope('output'): network = common.fullyConnected(network, num_output, wd= wd) else: with tf.variable_scope('transfer_output'): network = common.fullyConnected(network, num_output, wd= wd) return network
def inference(x, num_output, wd, is_training, transfer_mode=False): with tf.variable_scope('block1'): network = block(x, [11, 4, 96], wd, is_training) network = common.maxPool(network, 3, 2) with tf.variable_scope('block2'): network = block(network, [5, 1, 256], wd, is_training) network = common.maxPool(network, 3, 2) with tf.variable_scope('block3'): network = block(network, [3, 1, 384], wd, is_training) network = common.maxPool(network, 3, 2) with tf.variable_scope('block4'): network = block(network, [3, 1, 1024], wd, is_training) network = common.avgPool(network, 7, 1) network = common.flatten(network) if not transfer_mode: with tf.variable_scope('output'): network = common.fullyConnected(network, num_output, wd=wd) else: with tf.variable_scope('transfer_output'): network = common.fullyConnected(network, num_output, wd=wd) return network
def inference(x, num_output, wd, is_training, transfer_mode=False): with tf.variable_scope('block1'): network = block(x, [11, 4, 96], wd, is_training) network = common.maxPool(network, 3, 2) with tf.variable_scope('block2'): network = block(network, [5, 1, 256], wd, is_training) network = common.maxPool(network, 3, 2) with tf.variable_scope('block3'): network = block(network, [3, 1, 384], wd, is_training) network = common.maxPool(network, 3, 2) with tf.variable_scope('block4'): network = block(network, [3, 1, 1024], wd, is_training) network = common.avgPool(network, 7, 1) network = common.flatten(network) output = [None] * len(num_output) for o in xrange(0, len(num_output)): with tf.variable_scope('output' + str(o)): output[o] = common.fullyConnected(network, num_output[o], wd=wd) return output
def getModel( x, num_output, wd, is_training, num_blocks=[3, 4, 6, 3], # defaults to 50-layer network bottleneck=True, transfer_mode=False): conv_weight_initializer = tf.truncated_normal_initializer(stddev=0.1) fc_weight_initializer = tf.truncated_normal_initializer(stddev=0.01) with tf.variable_scope('scale1'): x = common.spatialConvolution( x, 7, 2, 64, weight_initializer=conv_weight_initializer, wd=wd) x = common.batchNormalization(x, is_training=is_training) x = tf.nn.relu(x) with tf.variable_scope('scale2'): x = common.maxPool(x, 3, 2) x = common.resnetStack(x, num_blocks[0], 1, 64, bottleneck, wd=wd, is_training=is_training) with tf.variable_scope('scale3'): x = common.resnetStack(x, num_blocks[1], 2, 128, bottleneck, wd=wd, is_training=is_training) with tf.variable_scope('scale4'): x = common.resnetStack(x, num_blocks[2], 2, 256, bottleneck, wd=wd, is_training=is_training) with tf.variable_scope('scale5'): x = common.resnetStack(x, num_blocks[3], 2, 512, bottleneck, wd=wd, is_training=is_training) # post-net x = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool") output = [None] * len(num_output) for o in xrange(0, len(num_output)): with tf.variable_scope('output' + str(o)): output[o] = common.fullyConnected( x, num_output[o], weight_initializer=fc_weight_initializer, bias_initializer=tf.zeros_initializer, wd=wd) return output
def inference(x, num_output, wd, dropout_rate, is_training, transfer_mode= False): with tf.variable_scope('features'): with tf.variable_scope('conv1'): network = common.spatialConvolution(x, 7, 2, 64, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu (network) network = common.maxPool(network, 3, 2) with tf.variable_scope('conv2'): network = common.spatialConvolution(network, 1, 1, 64, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) with tf.variable_scope('conv3'): network = common.spatialConvolution(network, 3, 1, 192, wd= wd) network = common.batchNormalization(network, is_training= is_training) network = tf.nn.relu(network) network = common.maxPool(network, 3, 2) with tf.variable_scope('inception3a'): network = inception( network, 64, [96, 128], [16, 32], 32, wd= wd, is_training= is_training) with tf.variable_scope('inception3b'): network = inception( network, 128, [128, 192], [32, 96], 64, wd= wd, is_training= is_training) network = common.maxPool(network, 3, 2) with tf.variable_scope('inception4a'): network = inception( network, 192, [96, 208], [16, 48], 64, wd= wd, is_training= is_training) with tf.variable_scope('inception4b'): network = inception( network, 160, [112, 224], [24, 64], 64, wd= wd, is_training= is_training) with tf.variable_scope('inception4c'): network = inception( network, 128, [128, 256], [24, 64], 64, wd= wd, is_training= is_training) with tf.variable_scope('inception4d'): network = inception( network, 112, [144, 288], [32, 64], 64, wd= wd, is_training= is_training) with tf.variable_scope('mainb'): with tf.variable_scope('inception4e'): main_branch = inception( network, 256, [160, 320], [32, 128], 128, wd= wd, is_training= is_training) main_branch = common.maxPool(main_branch, 3, 2) with tf.variable_scope('inception5a'): main_branch= inception(main_branch, 256, [160, 320], [32, 128], 128, wd= wd, is_training= is_training) with tf.variable_scope('inception5b'): main_branch= inception(main_branch, 384, [192, 384], [48, 128], 128, wd= wd, is_training= is_training) main_branch= common.avgPool(main_branch, 7, 1) main_branch= common.flatten(main_branch) main_branch= tf.nn.dropout(main_branch, dropout_rate) if not transfer_mode: with tf.variable_scope('output'): main_branch= common.fullyConnected(main_branch, num_output, wd= wd) else: with tf.variable_scope('transfer_output'): main_branch= common.fullyConnected(main_branch, num_output, wd= wd) with tf.variable_scope('auxb'): aux_classifier= common.avgPool(network, 5, 3) with tf.variable_scope('conv1'): aux_classifier= common.spatialConvolution(aux_classifier, 1, 1, 128, wd= wd) aux_classifier= common.batchNormalization(aux_classifier, is_training= is_training) aux_classifier= tf.nn.relu(aux_classifier) aux_classifier= common.flatten(aux_classifier) with tf.variable_scope('fc1'): aux_classifier= common.fullyConnected(aux_classifier, 1024, wd= wd) aux_classifier= tf.nn.dropout(aux_classifier, dropout_rate) if not transfer_mode: with tf.variable_scope('output'): aux_classifier= common.fullyConnected(aux_classifier, num_output, wd= wd) else: with tf.variable_scope('transfer_output'): aux_classifier= common.fullyConnected(aux_classifier, num_output, wd= wd) return tf.concat([main_branch, aux_classifier],1)
def getModel(x, num_output, K, stages, dropout_rate, wd, is_training, transfer_mode=False): print("input", x) with tf.variable_scope('conv1'): x = common.spatialConvolution(x, 7, 2, 2 * K, wd=wd) print("First Conv", x) x = common.batchNormalization(x, is_training=is_training) x = tf.nn.relu(x) x = common.maxPool(x, 3, 2) print("First Maxpool", x) with tf.variable_scope('block1'): x = block(x, stages[0], K, is_training=is_training, dropout_rate=dropout_rate, wd=wd) print("block1", x) with tf.variable_scope('trans1'): x = transition(x, K, dropout_rate=dropout_rate, wd=wd, is_training=is_training) print("transition1", x) with tf.variable_scope('block2'): x = block(x, stages[1], K, is_training=is_training, dropout_rate=dropout_rate, wd=wd) print("block2", x) with tf.variable_scope('trans2'): x = transition(x, K, dropout_rate=dropout_rate, wd=wd, is_training=is_training) print("transition2", x) with tf.variable_scope('block3'): x = block(x, stages[2], K, is_training=is_training, dropout_rate=dropout_rate, wd=wd) print("block3", x) with tf.variable_scope('trans3'): x = transition(x, K, dropout_rate=dropout_rate, wd=wd, is_training=is_training) print("transition3", x) with tf.variable_scope('block4'): x = block(x, stages[3], K, is_training=is_training, dropout_rate=dropout_rate, wd=wd) print("block4", x) x = common.avgPool(x, 7, 1, padding='VALID') print("Last Avg Pool", x) x = common.flatten(x) print("flatten", x) output = [None] * 8 with tf.variable_scope('output0'): output[0] = common.fullyConnected(x, 48, wd=wd) with tf.variable_scope('output1'): output[1] = common.fullyConnected(x, 12, wd=wd) for o in xrange(2, 8): with tf.variable_scope('output' + str(o)): output[o] = common.fullyConnected(x, 2, wd=wd) return output