def model(is_training, reuse, dropout_keep_prob=0.5): common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal( scale=1), untie_biases=False, **common_args) pool_args = make_args(padding='SAME', **common_args) inputs = input((None, crop_size[1], crop_size[0], 3), **common_args) with tf.variable_scope('squeezenet', values=[inputs]): net = conv2d(inputs, 96, stride=(2, 2), name='conv1', **conv_args) net = max_pool(net, name='maxpool1', **pool_args) net = fire_module(net, 16, 64, name='fire2', **conv_args) net = fire_module(net, 16, 64, name='fire3', **conv_args) net = fire_module(net, 32, 128, name='fire4', **conv_args) net = max_pool(net, name='maxpool4', **pool_args) net = fire_module(net, 32, 128, name='fire5', **conv_args) net = fire_module(net, 48, 192, name='fire6', **conv_args) net = fire_module(net, 48, 192, name='fire7', **conv_args) net = fire_module(net, 64, 256, name='fire8', **conv_args) net = max_pool(net, name='maxpool8', **pool_args) net = fire_module(net, 64, 256, name='fire9', **conv_args) # Reversed avg and conv layers per 'Network in Network' net = dropout(net, drop_p=1 - dropout_keep_prob, name='dropout6', **common_args) net = conv2d(net, 10, filter_size=(1, 1), name='conv10', **conv_args) logits = global_avg_pool(net, name='logits', **pool_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(inputs, is_training, reuse, num_classes=2): common_args = common_layer_args(is_training, reuse) conv1 = conv2d(inputs, 32, name='conv1', activation=prelu, **common_args) conv1 = conv2d(conv1, 32, name='conv2', activation=prelu, **common_args) fc1 = fc(conv1, num_classes, name='logits', **common_args) prediction = softmax(fc1, name='prediction', **common_args) return end_points(is_training)
def _attention(query, attn_states, is_training, reuse, attn_size, attn_vec_size, attn_length, trainable=True, name='attention'): with tf.variable_scope(name, reuse=reuse): v = tf.get_variable(name="V", shape=[attn_vec_size], trainable=trainable) attn_states_reshaped = tf.reshape( attn_states, shape=[-1, attn_length, 1, attn_size]) attn_conv = conv2d(attn_states_reshaped, attn_vec_size, is_training, reuse, filter_size=(1, 1), stride=(1, 1), trainable=trainable, use_bias=False) y = _linear(query, attn_vec_size, reuse) y = tf.reshape(y, [-1, 1, 1, attn_vec_size]) s = tf.reduce_sum(v * tf.tanh(attn_conv + y), [2, 3]) a = softmax(s) d = tf.reduce_sum( tf.reshape(a, [-1, attn_length, 1, 1]) * attn_states_reshaped, [1, 2]) new_attns = tf.reshape(d, [-1, attn_size]) new_attn_states = tf.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
def model(is_training, resue, num_classes=5): common_args = common_layer_args(is_training, reuse) conv_args = make_args( untie_biases=True, batch_norm=batch_norm, **common_args) logit_args = make_args(activation=prelu, **common_args) inputs = input((None, crop_size[1], crop_size[0], 3), **common_args) net = conv2d(inputs, 32, filter_size=(3, 3), stride=( 2, 2), name='conv1', **conv_params) net = conv2d(net, 64, name='conv2', **conv_params) net = bottleneck_v1(net, num_unit=128, name='block_v1_1', **conv_args) net = bottleneck_v1(net, num_unit=256, name='block_v1_2', **conv_args) net = bottleneck_v1(net, num_unit=728, name='block_v1_3', **conv_args) for i in range(8): prefix = 'block_v2_' + str(i + 5) net = bottleneck_v2(net, num_unit=728, name=prefix, **kwargs) net = bottleneck_v1(net, num_unit=1024, name='block_v1_4', **conv_args) net = separable_conv2d(net, 1536, filter_size=(3, 3), stride=(1, 1), name='sconv1', **kwargs) net = separable_conv2d(net, 2048, filter_size=(3, 3), stride=(1, 1), name='sconv2', **kwargs) with tf.variable_scope('Logits'): net = avg_pool_2d(net, net.get_shape()[1:3], name='AvgPool_1a') net = dropout( net, is_training, drop_p=1 - dropout_keep_prob, name='Dropout_1b') logits = fully_connected(net, num_classes, name='logits', **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(x, y, batch_size, is_training=True, reuse=None): with tf.variable_scope('model', reuse=reuse): x_tensor = tf.reshape(x, [-1, 28, 28, 1]) fc1 = fc(x, 20, is_training, reuse, name='fc1', activation=None) fc1 = tf.tanh(fc1) fc1 = dropout(fc1, is_training, drop_p=0.5) fc2 = fc(fc1, 6, is_training, reuse, use_bias=False, name='fc2') initial = np.array([[1., 0, 0], [0, 1., 0]]) initial = initial.astype('float32') initial = initial.flatten() fc2_b = tf.Variable(initial_value=initial, name='fc2/b') fc2 = tf.nn.bias_add(fc2, bias=fc2_b) fc2 = tf.tanh(fc2) h_trans = spatialtransformer(x_tensor, fc2, batch_size=batch_size) conv1 = conv2d(h_trans, 16, is_training, reuse, activation=prelu, name='conv1') conv2 = conv2d(conv1, 16, is_training, reuse, stride=(2, 2), activation=prelu, name='conv2') fcmain = fc(conv2, 1024, is_training, reuse, name='fc', activation=prelu) fcmain = dropout(fcmain, is_training, drop_p=0.5) logits = fc(fcmain, 10, is_training, reuse, name='logits', activation=None) prediction = softmax(logits, 'prediction') loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) opt = tf.train.AdamOptimizer() optimizer = opt.minimize(loss) # grads = opt.compute_gradients(loss, [fc2_b]) correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) return accuracy, loss, optimizer
def model(x, is_training, reuse): common_args = common_layer_args(is_training, reuse) fc_args = make_args(activation=relu, **common_args) logit_args = make_args(activation=None, **common_args) x = embedding(x, 10000, 128, reuse) x = lstm(x, 34, reuse, is_training) logits = fc(x, 2, name='logits', **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(inputs, is_training, reuse, num_classes=10, dropout_keep_prob=0.5): common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) conv_args_fm = make_args(w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) pool_args = make_args(padding='SAME', **common_args) with tf.variable_scope('squeezenet', values=[inputs]): net = separable_conv2d(inputs, 256, stride=(2, 2), name='conv1', **conv_args) # net = conv2d(inputs, 96, stride=(2, 2), name='conv1', **conv_args) net = max_pool(net, name='maxpool1', **pool_args) net = fire_module(net, 16, 64, name='fire2', **conv_args_fm) net = bottleneck_simple(net, 16, 64, name='fire3', **conv_args_fm) net = batch_norm(net, activation_fn=tf.nn.relu, name='fire3_bn', is_training=is_training, reuse=reuse) net = fire_module(net, 32, 128, name='fire4', **conv_args_fm) net = max_pool(net, name='maxpool4', **pool_args) net = bottleneck_simple(net, 32, 128, name='fire5', **conv_args_fm) net = batch_norm(net, activation_fn=tf.nn.relu, name='fire5_bn', is_training=is_training, reuse=reuse) net = fire_module(net, 48, 192, name='fire6', **conv_args_fm) net = bottleneck_simple(net, 48, 192, name='fire7', **conv_args_fm) net = batch_norm(net, activation_fn=tf.nn.relu, name='fire7_bn', is_training=is_training, reuse=reuse) net = fire_module(net, 64, 256, name='fire8', **conv_args_fm) net = max_pool(net, name='maxpool8', **pool_args) net = dropout(net, drop_p=1 - dropout_keep_prob, name='dropout6', **common_args) net = conv2d(net, num_classes, filter_size=(1, 1), name='conv10', **conv_args_fm) logits = global_avg_pool(net, name='logits', **pool_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(x, is_training, reuse, num_classes=2, **kwargs): common_args = common_layer_args(is_training, reuse) fc_args = make_args(activation=relu, **common_args) logit_args = make_args(activation=None, **common_args) x = embedding(x, 10000, 128, reuse) x = bidirectional_rnn(x, LSTMCell(128, reuse), LSTMCell(128, reuse), **common_args) logits = fc(x, num_classes, name='logits', **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(is_training, reuse): common_args = common_layer_args(is_training, reuse) fc_args = make_args(activation=relu, **common_args) logit_args = make_args(activation=None, **common_args) x = input((None, height * width), **common_args) x = fully_connected(x, n_output=100, name='fc1', **fc_args) logits = fully_connected(x, n_output=10, name="logits", **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def discriminator(inputs, is_training, reuse, num_classes=1): common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=lrelu, w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) conv_args_1st = make_args(batch_norm=None, activation=lrelu, w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) logits_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args) pool_args = make_args(padding='SAME', **common_args) end_points = {} x = inputs end_points['inputs'] = x x = dropout(x, drop_p=0.2, name="input_dropout1", **common_args) x = conv2d(x, 96, filter_size=(5, 5), stride=(2, 2), name="d_conv1_1", **conv_args_1st) end_points['d_conv1_1'] = x x = conv2d(x, 96, name="d_conv1_2", **conv_args) end_points['d_conv1_2'] = x x = conv2d(x, 96, stride=(2, 2), name="d_conv1_3", **conv_args) end_points['d_conv1_3'] = x x = dropout(x, drop_p=0.2, name="dropout1", **common_args) x = conv2d(x, 192, name="d_conv2_1", **conv_args) end_points['d_conv2_1'] = x x = conv2d(x, 192, name="d_conv2_2", **conv_args) end_points['d_conv2_2'] = x # x = conv2d(x, 192, stride=(2, 2), name="d_conv2_3", **conv_args) # end_points['d_conv2_3'] = x x = dropout(x, drop_p=0.2, name="dropout2", **common_args) # x = conv2d(x, 192, stride=(2, 2), name="d_conv3_1", **conv_args) # end_points['d_conv3_1'] = x x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_1", **conv_args) end_points['d_conv4_1'] = x x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_2", **conv_args) end_points['d_conv4_2'] = x x = global_avg_pool(x, name="global_pool") end_points['global_pool'] = x logits = fully_connected(x, num_classes, name="d_logits", **logits_args) end_points['logits'] = logits end_points['predictions'] = softmax(logits, name='predictions', **common_args) return end_points
def model(x, is_training, reuse, num_classes=10, **kwargs): common_args = common_layer_args(is_training, reuse) fc_args = make_args(activation=relu, **common_args) logit_args = make_args(activation=None, **common_args) x = fully_connected(x, n_output=100, name='fc1', **fc_args) logits = fully_connected(x, n_output=num_classes, name="logits", **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(is_training, reuse): common_args = common_layer_args(is_training, reuse) x = input((None, 7, 7, 512), **common_args) # x = batch_norm_tf(x, **common_args) x = fully_connected(x, 512, activation=relu, name='fc1', **common_args) x = dropout(x, drop_p=0.5, name='dropout1', **common_args) logits = fully_connected(x, 6, activation=None, name='logits', **common_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(x, is_training, reuse, num_classes=2, **kwargs): common_args = common_layer_args(is_training, reuse) fc_args = make_args(activation=relu, **common_args) logit_args = make_args(activation=None, **common_args) x = embedding(x, 10000, 128, reuse) x1 = conv1d(x, 128, name='conv1_1', **common_args) x2 = conv1d(x, 128, filter_size=4, name='conv1_2', **common_args) x3 = conv1d(x, 128, filter_size=5, name='conv1_3', **common_args) x = merge([x1, x2, x3], 'concat', axis=1) x = lstm(x, 384, reuse, is_training) x = dropout(x, drop_p=0.3, **common_args) logits = fc(x, num_classes, name='logits', **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(x, is_training, reuse): common_args = common_layer_args(is_training, reuse) fc_args = make_args(activation=relu, **common_args) logit_args = make_args(activation=None, **common_args) x = embedding(x, 10000, 128, reuse) x1 = conv1d(x, 128, name='conv1_1', **common_args) x2 = conv1d(x, 128, filter_size=4, name='conv1_2', **common_args) x3 = conv1d(x, 128, filter_size=5, name='conv1_3', **common_args) x = merge([x1, x2, x3], 'concat', axis=1) x = tf.expand_dims(x, 2) x = global_max_pool(x) x = dropout(x, drop_p=0.3, **common_args) logits = fc(x, 2, name='logits', **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(inputs, is_training, reuse, input_size=image_size[0], drop_p_conv=0.0, drop_p_trans=0.0, n_filters=64, n_layers=[1, 2, 2, 3], num_classes=5, **kwargs): common_args = common_layer_args(is_training, reuse) conv_args = make_args( batch_norm=True, activation=prelu, w_init=initz.he_normal(scale=1), untie_biases=True, **common_args) fc_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args) logit_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args) pred_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args) pool_args = make_args(padding='SAME', filter_size=(2, 2), stride=(2, 2), **common_args) x = conv2d(inputs, 48, filter_size=(7, 7), name="conv1", **conv_args) x = max_pool(x, name='pool1', **pool_args) x = conv2d(x, 64, name="conv2_1", **conv_args) x = conv2d(x, 64, name="conv2_2", **conv_args) x = max_pool(x, name='pool2', **pool_args) # 112 for block_idx in range(3): x, n_filters = dense_block( x, n_filters, num_layers=n_layers[block_idx], drop_p=drop_p_conv, block_name='dense_' + str(block_idx), **conv_args) x = trans_block( x, n_filters, drop_p=drop_p_trans, block_name='trans_' + str(block_idx), **conv_args) x, n_filters = dense_block( x, n_filters, num_layers=n_layers[3], drop_p=drop_p_trans, block_name='dense_3', **conv_args) # 8 x = global_avg_pool(x, name='avgpool_1a_8x8') logits = fully_connected(x, n_output=num_classes, name="logits", **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(is_training, reuse): common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=None, activation=prelu, **common_args) fc_args = make_args(activation=prelu, **common_args) logit_args = make_args(activation=None, **common_args) x = input((None, crop_size[1], crop_size[0], 1), **common_args) x = conv2d(x, 32, name='conv1_1', **conv_args) x = conv2d(x, 32, name='conv1_2', **conv_args) x = max_pool(x, name='pool1', **common_args) x = dropout(x, drop_p=0.25, name='dropout1', **common_args) x = fully_connected(x, n_output=128, name='fc1', **fc_args) x = dropout(x, drop_p=0.5, name='dropout2', **common_args) logits = fully_connected(x, n_output=36, name="logits", **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def resnet_v1(inputs, blocks, num_classes=None, global_pool=True, output_stride=None, include_root_block=True, scope=None, **common_args): conv_args = make_args(use_bias=False, activation=relu, batch_norm=batch_norm_tf, batch_norm_args=batch_norm_params, **common_args) with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=common_args['reuse']) as sc: net = inputs if include_root_block: if output_stride is not None: if output_stride % 4 != 0: raise ValueError( 'The output_stride needs to be a multiple of 4.') output_stride /= 4 net = conv2d_same(net, 64, 7, stride=2, scope='conv1', **conv_args) net = max_pool(net, filter_size=(3, 3), stride=(2, 2), padding='SAME', name='pool1') net = stack_blocks_dense(net, blocks, output_stride, **common_args) if global_pool: # Global average pooling. net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) if num_classes is not None: net = conv2d(net, num_classes, filter_size=(1, 1), activation=None, name='logits', **common_args) predictions = softmax(net, name='predictions', **common_args) return end_points(common_args['is_training'])
def model(is_training, reuse): common_args = common_layer_args(is_training, reuse) conv_args = make_args(activation=relu, **common_args) pool_args = make_args(filter_size=(2, 2), **common_args) fc_args = make_args(activation=relu, **common_args) logit_args = make_args(activation=None, **common_args) x = input((None, crop_size[1], crop_size[0], 3), **common_args) x = conv2d(x, 64, name='conv1_1', **conv_args) x = conv2d(x, 64, name='conv1_2', **conv_args) x = max_pool(x, name='maxpool1', **pool_args) x = conv2d(x, 128, name='conv2_1', **conv_args) x = conv2d(x, 128, name='conv2_2', **conv_args) x = max_pool(x, name='maxpool2', **pool_args) x = conv2d(x, 256, name='conv3_1', **conv_args) x = conv2d(x, 256, name='conv3_2', **conv_args) x = conv2d(x, 256, name='conv3_3', **conv_args) x = max_pool(x, name='maxpool3', **pool_args) x = conv2d(x, 512, name='conv4_1', **conv_args) x = conv2d(x, 512, name='conv4_2', **conv_args) x = conv2d(x, 512, name='conv4_3', **conv_args) x = max_pool(x, name='maxpool4', **pool_args) x = conv2d(x, 512, name='conv5_1', **conv_args) x = conv2d(x, 512, name='conv5_2', **conv_args) x = conv2d(x, 512, name='conv5_3', **conv_args) x = max_pool(x, name='maxpool5', **pool_args) x = fully_connected(x, n_output=4096, name='fc6', **fc_args) x = dropout(x, drop_p=0.5, name='dropout1', **common_args) x = fully_connected(x, n_output=4096, name='fc7', **fc_args) x = dropout(x, drop_p=0.5, name='dropout2', **common_args) logits = fully_connected(x, n_output=1000, name="logits", **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(x, is_training, reuse, num_classes=10, **config): common_args = common_layer_args(is_training, reuse) logit_args = make_args(activation=None, **common_args) if config['max_conv_layers']>0: for i in range(1, config['n_conv_layers']+1): activation, size, maxpool = layer_config(config, i, layer_type='conv') conv_args = make_args(batch_norm=bool(config['batch_norm']), activation=prelu, **common_args) x = conv2d(x, size, name='conv{}'.format(i), **conv_args) if maxpool: x = max_pool(x, name='pool{}'.format(i), **common_args) if config['max_fc_layers']>0: for i in range(1, config['n_fc_layers']+1): activation, size, _dropout = layer_config(config, i, layer_type='fc') fc_args = make_args(activation=prelu, **common_args) x = fully_connected(x, n_output=size, name='fc{}'.format(i), **fc_args) x = dropout(x, drop_p=np.round(_dropout, 2), name='dropout{}'.format(i), **common_args) logits = fully_connected(x, n_output=num_classes, name="logits", **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(inputs, is_training, reuse, num_classes=5, drop_prob=0.2, name='InceptionResnetV2'): common_args = common_layer_args(is_training, reuse) rest_conv_params = make_args(use_bias=False, batch_norm=batch_norm, activation=relu, **common_args) conv_params_no_bias = make_args(use_bias=False, batch_norm=batch_norm, activation=relu, **common_args) conv_params = make_args(use_bias=True, batch_norm=batch_norm, activation=None, **common_args) rest_logit_params = make_args(activation=None, **common_args) rest_pool_params = make_args(padding='SAME', **common_args) rest_dropout_params = make_args(drop_p=drop_prob, **common_args) # inputs = input((None, crop_size[1], crop_size[0], 3), **common_args) with tf.variable_scope(name, 'InceptionResnetV2'): net = conv2d(inputs, 32, stride=(2, 2), name='Conv2d_1a_3x3', **conv_params_no_bias) net = conv2d(net, 32, name='Conv2d_2a_3x3', **conv_params_no_bias) # 112 x 112 net = conv2d(net, 64, name='Conv2d_2b_3x3', **rest_conv_params) # 112 x 112 net = max_pool(net, name='MaxPool_3a_3x3', **rest_pool_params) # 64 x 64 net = conv2d(net, 80, filter_size=(1, 1), name='Conv2d_3b_1x1', **rest_conv_params) # 64 x 64 net = conv2d(net, 192, name='Conv2d_4a_3x3', **rest_conv_params) # 64 x 64 net = max_pool(net, stride=(2, 2), name='maxpool_5a_3x3', **rest_pool_params) # 32 x 32 with tf.variable_scope('Mixed_5b'): with tf.variable_scope('Branch_0'): tower_conv = conv2d(net, 96, filter_size=(1, 1), name='Conv2d_1x1', **rest_conv_params) with tf.variable_scope('Branch_1'): tower_conv1_0 = conv2d(net, 48, filter_size=(1, 1), name='Conv2d_0a_1x1', **rest_conv_params) tower_conv1_1 = conv2d(tower_conv1_0, 64, filter_size=(5, 5), name='Conv2d_0b_5x5', **rest_conv_params) with tf.variable_scope('Branch_2'): tower_conv2_0 = conv2d(net, 64, filter_size=(1, 1), name='Conv2d_0a_1x1', **rest_conv_params) tower_conv2_1 = conv2d(tower_conv2_0, 96, name='Conv2d_0b_3x3', **rest_conv_params) tower_conv2_2 = conv2d(tower_conv2_1, 96, name='Conv2d_0c_3x3', **rest_conv_params) with tf.variable_scope('Branch_3'): tower_pool = avg_pool_2d(net, stride=(1, 1), name='avgpool_0a_3x3', **rest_pool_params) tower_pool_1 = conv2d(tower_pool, 64, filter_size=(1, 1), name='Conv2d_0b_1x1', **rest_conv_params) net = tf.concat( [tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3) with tf.variable_scope('Repeat'): for i in range(1, 11): net = block35(net, name='block35_' + str(i), scale=0.17, **conv_params_no_bias) # 32 x 32 with tf.variable_scope('Mixed_6a'): with tf.variable_scope('Branch_0'): tower_conv = conv2d(net, 384, stride=(2, 2), name='Conv2d_1a_3x3', **rest_conv_params) with tf.variable_scope('Branch_1'): tower_conv1_0 = conv2d(net, 256, filter_size=(1, 1), name='Conv2d_0a_1x1', **rest_conv_params) tower_conv1_1 = conv2d(tower_conv1_0, 256, name='Conv2d_0b_3x3', **rest_conv_params) tower_conv1_2 = conv2d(tower_conv1_1, 384, stride=(2, 2), name='Conv2d_1a_3x3', **rest_conv_params) with tf.variable_scope('Branch_2'): tower_pool = max_pool(net, name='maxpool_1a_3x3', **rest_pool_params) net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3) with tf.variable_scope('Repeat_1'): for i in range(1, 21): net = block17(net, name='block17_' + str(i), scale=0.10, **conv_params_no_bias) with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Branch_0'): tower_conv = conv2d(net, 256, filter_size=(1, 1), name='Conv2d_0a_1x1', **rest_conv_params) tower_conv_1 = conv2d(tower_conv, 384, stride=(2, 2), name='Conv2d_1a_3x3', **rest_conv_params) with tf.variable_scope('Branch_1'): tower_conv1 = conv2d(net, 256, filter_size=(1, 1), name='Conv2d_0a_1x1', **rest_conv_params) tower_conv1_1 = conv2d(tower_conv1, 288, stride=(2, 2), name='Conv2d_1a_3x3', **rest_conv_params) with tf.variable_scope('Branch_2'): tower_conv2 = conv2d(net, 256, filter_size=(1, 1), name='Conv2d_0a_1x1', **rest_conv_params) tower_conv2_1 = conv2d(tower_conv2, 288, name='Conv2d_0b_3x3', **rest_conv_params) tower_conv2_2 = conv2d(tower_conv2_1, 320, stride=(2, 2), name='Conv2d_1a_3x3', **rest_conv_params) with tf.variable_scope('Branch_3'): tower_pool = max_pool(net, name='maxpool_1a_3x3', **rest_pool_params) net = tf.concat( [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3) # 8 x 8 with tf.variable_scope('Repeat_2'): for i in range(1, 10): net = block8(net, name='block8_' + str(i), scale=0.20, **conv_params_no_bias) net = block8(net, name='Block8', **conv_params_no_bias) net = conv2d(net, 1536, filter_size=(1, 1), name='Conv2d_7b_1x1', **rest_conv_params) with tf.variable_scope('Logits'): net = global_avg_pool(net, name='avgpool_1a_8x8') net = dropout(net, name='dropout', **rest_dropout_params) logits = fully_connected(net, num_classes, name='Logits', **rest_logit_params) predictions = softmax(logits, name='Predictions', **common_args) return end_points(is_training)
def model(inputs, is_training, reuse, num_classes=21, batch_size=1): common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=lrelu, w_init=initz.he_normal( scale=1), untie_biases=False, **common_args) upsample_args = make_args( batch_norm=False, activation=lrelu, use_bias=False, **common_args) logits_args = make_args( activation=None, **common_args) pool_args = make_args(padding='SAME', **common_args) conv1_1 = conv2d(inputs, 64, name="vgg_19/conv1/conv1_1", **conv_args) conv1_2 = conv2d(conv1_1, 64, name="vgg_19/conv1/conv1_2", **conv_args) pool1 = max_pool(conv1_2, stride=2, name='pool1', **pool_args) conv2_1 = conv2d(pool1, 128, name="vgg_19/conv2/conv2_1", **conv_args) conv2_2 = conv2d(conv2_1, 128, name="vgg_19/conv2/conv2_2", **conv_args) pool2 = max_pool(conv2_2, stride=2, name='pool2', **pool_args) conv3_1 = conv2d(pool2, 256, name="vgg_19/conv3/conv3_1", **conv_args) conv3_2 = conv2d(conv3_1, 256, name="vgg_19/conv3/conv3_2", **conv_args) conv3_3 = conv2d(conv3_2, 256, name="vgg_19/conv3/conv3_3", **conv_args) conv3_4 = conv2d(conv3_3, 256, name="vgg_19/conv3/conv3_4", **conv_args) pool3 = max_pool(conv3_4, stride=2, name='pool3', **pool_args) conv4_1 = conv2d(pool3, 512, name="vgg_19/conv4/conv4_1", **conv_args) conv4_2 = conv2d(conv4_1, 512, name="vgg_19/conv4/conv4_2", **conv_args) conv4_3 = conv2d(conv4_2, 512, name="vgg_19/conv4/conv4_3", **conv_args) conv4_4 = conv2d(conv4_3, 512, name="vgg_19/conv4/conv4_4", **conv_args) pool4 = max_pool(conv4_4, stride=2, name='pool4', **pool_args) conv5_1 = conv2d(pool4, 512, name="vgg_19/conv5/conv5_1", **conv_args) conv5_2 = conv2d(conv5_1, 512, name="vgg_19/conv5/conv5_2", **conv_args) conv5_3 = conv2d(conv5_2, 512, name="vgg_19/conv5/conv5_3", **conv_args) conv5_4 = conv2d(conv5_3, 512, name="vgg_19/conv5/conv5_4", **conv_args) pool5 = max_pool(conv5_4, stride=2, name='pool5', **pool_args) fc6 = conv2d(pool5, 4096, filter_size=(7, 7), name="vgg_19/fc6", **conv_args) fc6 = dropout(fc6, **common_args) fc7 = conv2d(fc6, 4096, filter_size=(1, 1), name="vgg_19/fc7", **conv_args) fc7 = dropout(fc7, **common_args) score_fr = conv2d(fc7, num_classes, filter_size=(1, 1), name="score_fr", **conv_args) pred = tf.argmax(score_fr, axis=3) pool4_shape = pool4.get_shape().as_list() upscore2 = upsample2d(score_fr, [batch_size, pool4_shape[1], pool4_shape[2], num_classes], filter_size=(4, 4), stride=(2, 2), name="deconv2d_1", w_init=initz.bilinear((4, 4, num_classes, num_classes)), **upsample_args) score_pool4 = conv2d(pool4, num_classes, filter_size=(1, 1), name="score_pool4", **conv_args) fuse_pool4 = tf.add(upscore2, score_pool4) pool3_shape = pool3.get_shape().as_list() upscore4 = upsample2d(fuse_pool4, [batch_size, pool3_shape[1], pool3_shape[2], num_classes], filter_size=(4, 4), stride=(2, 2), name="deconv2d_2", w_init=initz.bilinear((4, 4, num_classes, num_classes)), **upsample_args) score_pool3 = conv2d(pool3, num_classes, filter_size=(1, 1), name="score_pool3", **conv_args) fuse_pool3 = tf.add(upscore4, score_pool3) input_shape = inputs.get_shape().as_list() upscore32 = upsample2d(fuse_pool3, [batch_size, input_shape[1], input_shape[2], num_classes], filter_size=(16, 16), stride=(8, 8), name="deconv2d_3", w_init=initz.bilinear((16, 16, num_classes, num_classes)), **logits_args) logits = register_to_collections(tf.reshape( upscore32, shape=(-1, num_classes)), name='logits', **common_args) pred_up = tf.argmax(upscore32, axis=3) pred_up = register_to_collections( pred_up, name='final_prediction_map', **common_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def discriminator(inputs, is_training, reuse, num_classes=11, batch_size=32): common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=lrelu, w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) conv_args_1st = make_args(batch_norm=None, activation=lrelu, w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) logits_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args) pool_args = make_args(padding='SAME', **common_args) end_points = {} x = inputs end_points['inputs'] = x x = dropout(x, drop_p=0.2, name="input_dropout1", **common_args) x = conv2d(x, 96, filter_size=(5, 5), stride=(2, 2), name="d_conv1_1", **conv_args_1st) end_points['d_conv1_1'] = x x = conv2d(x, 96, name="d_conv1_2", **conv_args) end_points['d_conv1_2'] = x x = conv2d(x, 96, stride=(2, 2), name="d_conv1_3", **conv_args) end_points['d_conv1_3'] = x x = dropout(x, drop_p=0.2, name="dropout1", **common_args) x = conv2d(x, 192, name="d_conv2_1", **conv_args) end_points['d_conv2_1'] = x x = conv2d(x, 192, name="d_conv2_2", **conv_args) end_points['d_conv2_2'] = x x = conv2d(x, 192, stride=(2, 2), name="d_conv2_3", **conv_args) end_points['d_conv2_3'] = x x = dropout(x, drop_p=0.2, name="dropout2", **common_args) x = conv2d(x, 192, stride=(2, 2), name="d_conv3_1", **conv_args) end_points['d_conv3_1'] = x x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_1", **conv_args) end_points['d_conv4_1'] = x x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_2", **conv_args) end_points['d_conv4_2'] = x x = global_avg_pool(x, name="global_pool") end_points['global_pool'] = x logits = fully_connected(x, num_classes, name="d_logits", **logits_args) end_points['logits'] = logits end_points['predictions'] = softmax(logits, name='predictions', **common_args) if is_training: batch_size = 2 * batch_size generated_class_logits = tf.squeeze( tf.slice(logits, [0, num_classes - 1], [batch_size, 1])) end_points['generated_class_logits'] = generated_class_logits positive_class_logits = tf.slice(logits, [0, 0], [batch_size, num_classes - 1]) end_points['positive_class_logits'] = positive_class_logits max_ = tf.reduce_max(positive_class_logits, 1, keep_dims=True) safe_pos_class_logits = positive_class_logits - max_ end_points['safe_pos_class_logits'] = safe_pos_class_logits gan_logits = tf.log( tf.reduce_sum(tf.exp(safe_pos_class_logits), 1)) + tf.squeeze(max_) - generated_class_logits end_points['gan_logits'] = gan_logits assert len(gan_logits.get_shape()) == 1 probs = tf.nn.sigmoid(gan_logits) end_points['probs'] = probs class_logits = tf.slice(logits, [0, 0], [batch_size / 2, num_classes]) end_points['class_logits'] = class_logits D_on_data = tf.slice(probs, [0], [batch_size / 2]) end_points['D_on_data'] = D_on_data D_on_data_logits = tf.slice(gan_logits, [0], [batch_size / 2]) end_points['D_on_data_logits'] = D_on_data_logits D_on_G = tf.slice(probs, [batch_size / 2], [batch_size / 2]) end_points['D_on_G'] = D_on_G D_on_G_logits = tf.slice(gan_logits, [batch_size / 2], [batch_size / 2]) end_points['D_on_G_logits'] = D_on_G_logits return end_points else: return end_points
def resnet_v1(inputs, is_training, reuse, blocks, num_classes=None, global_pool=True, output_stride=None, include_root_block=True, name=None): """Generator for v2 (preactivation) ResNet models. This function generates a family of ResNet v2 models. See the resnet_v2_*() methods for specific model instantiations, obtained by selecting different block instantiations that produce ResNets of various depths. Training for image classification on Imagenet is usually done with [224, 224] inputs, resulting in [7, 7] feature maps at the output of the last ResNet block for the ResNets defined in [1] that have nominal stride equal to 32. However, for dense prediction tasks we advise that one uses inputs with spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In this case the feature maps at the ResNet output will have spatial shape [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1] and corners exactly aligned with the input image corners, which greatly facilitates alignment of the features to the image. Using as input [225, 225] images results in [8, 8] feature maps at the output of the last ResNet block. For dense prediction tasks, the ResNet needs to run in fully-convolutional (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all have nominal stride equal to 32 and a good choice in FCN mode is to use output_stride=16 in order to increase the density of the computed features at small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. blocks: A list of length equal to the number of ResNet blocks. Each element is a resnet_utils.Block object describing the units in the block. num_classes: Number of predicted classes for classification tasks. If None we return the features before the logit layer. is_training: whether is training or not. global_pool: If True, we perform global average pooling before computing the logits. Set to True for image classification, False for dense prediction. output_stride: If None, then the output will be computed at the nominal network stride. If output_stride is not None, it specifies the requested ratio of input to output spatial resolution. include_root_block: If True, include the initial convolution followed by max-pooling, if False excludes it. If excluded, `inputs` should be the results of an activation-less convolution. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. name: Optional variable_scope. Returns: net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. If global_pool is False, then height_out and width_out are reduced by a factor of output_stride compared to the respective height_in and width_in, else both height_out and width_out equal one. If num_classes is None, then net is the output of the last ResNet block, potentially after global average pooling. If num_classes is not None, net contains the pre-softmax activations. end_points: A dictionary from components of the network to the corresponding activation. Raises: ValueError: If the target output_stride is not valid. """ common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) logits_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args) pred_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args) pool_args = make_args(padding='SAME', **common_args) with tf.variable_scope(name, 'resnet_v2', [inputs], reuse=reuse): net = inputs if include_root_block: if output_stride is not None: if output_stride % 4 != 0: raise ValueError( 'The output_stride needs to be a multiple of 4.') output_stride /= 4 # We do not include batch normalization or activation functions in # conv1 because the first ResNet unit will perform these. Cf. # Appendix of [2]. net = resnet_utils.conv2d_same(net, 64, 7, stride=2, name='conv1', **common_args) net = max_pool(net, name='pool1', **pool_args) net = resnet_utils.stack_blocks_dense(net, blocks, output_stride, **conv_args) # This is needed because the pre-activation variant does not have batch # normalization or activation functions in the residual unit output. See # Appendix of [2]. net = batch_norm(net, activation=tf.nn.relu, name='postnorm', is_training=is_training, reuse=reuse) if global_pool: # Global average pooling. net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) if num_classes is not None: net = conv2d(net, num_classes, filter_size=(1, 1), name='logits', **logits_args) if num_classes is not None: predictions = softmax(net, name='predictions', **pred_args) return end_points(is_training)
def model(is_training, reuse, flexi_inputs=False): common_args = common_layer_args(is_training, reuse) conv_args = make_args(activation=relu, **common_args) pool_args = make_args(filter_size=(2, 2), **common_args) logit_args = make_args(activation=None, **common_args) if flexi_inputs: inputs_shape = (None, None, None, 3) else: inputs_shape = (None, crop_size[1], crop_size[0], 3) net_inputs = input(inputs_shape, **common_args) x = net_inputs with tf.variable_scope('vgg_16', reuse=reuse): mean_rgb = tf.get_variable(name='mean_rgb', initializer=tf.truncated_normal(shape=[3]), trainable=False) x = x - mean_rgb with tf.variable_scope('conv1'): x = conv2d(x, 64, name='conv1_1', **conv_args) x = conv2d(x, 64, name='conv1_2', **conv_args) x = max_pool(x, name='maxpool1', **pool_args) with tf.variable_scope('conv2'): x = conv2d(x, 128, name='conv2_1', **conv_args) x = conv2d(x, 128, name='conv2_2', **conv_args) x = max_pool(x, name='maxpool2', **pool_args) with tf.variable_scope('conv3'): x = conv2d(x, 256, name='conv3_1', **conv_args) x = conv2d(x, 256, name='conv3_2', **conv_args) x = conv2d(x, 256, name='conv3_3', **conv_args) x = max_pool(x, name='maxpool3', **pool_args) with tf.variable_scope('conv4'): x = conv2d(x, 512, name='conv4_1', **conv_args) x = conv2d(x, 512, name='conv4_2', **conv_args) x = conv2d(x, 512, name='conv4_3', **conv_args) x = max_pool(x, name='maxpool4', **pool_args) with tf.variable_scope('conv5'): x = conv2d(x, 512, name='conv5_1', **conv_args) x = conv2d(x, 512, name='conv5_2', **conv_args) x = conv2d(x, 512, name='conv5_3', **conv_args) x = max_pool(x, name='maxpool5', **pool_args) x = conv2d(x, 4096, name='fc6', filter_size=(7, 7), padding='VALID', **conv_args) x = dropout(x, drop_p=0.5, name='dropout6', **common_args) x = conv2d(x, 4096, name='fc7', filter_size=(1, 1), **conv_args) x = dropout(x, drop_p=0.5, name='dropout7', **common_args) x = conv2d(x, 1000, name='fc8', filter_size=(1, 1), **logit_args) if flexi_inputs: logits = alias(x, name='logits', **common_args) else: logits = squeeze(x, axis=[1, 2], name='logits', **common_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def vgg_16(is_training, reuse, num_classes=1000, dropout_keep_prob=0.5, spatial_squeeze=True, name='vgg_16'): """Oxford Net VGG 16-Layers version D Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. name: Optional name for the variables. Returns: the last op containing the log predictions and end_points dict. """ common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal( scale=1), untie_biases=False, **common_args) logit_args = make_args( activation=None, w_init=initz.he_normal(scale=1), **common_args) pred_args = make_args( activation=prelu, w_init=initz.he_normal(scale=1), **common_args) pool_args = make_args(padding='SAME', **common_args) inputs = input((None, crop_size[1], crop_size[0], 3), **common_args) with tf.variable_scope(name, 'vgg_16', [inputs]): net = repeat(inputs, 2, conv2d, 64, filter_size=(3, 3), name='conv1', **conv_args) net = max_pool(net, name='pool1', **pool_args) net = repeat(net, 2, conv2d, 128, filter_size=( 3, 3), name='conv2', **conv_args) net = max_pool(net, name='pool2', **pool_args) net = repeat(net, 3, conv2d, 256, filter_size=( 3, 3), name='conv3', **conv_args) net = max_pool(net, name='pool3', **pool_args) net = repeat(net, 3, conv2d, 512, filter_size=( 3, 3), name='conv4', **conv_args) net = max_pool(net, name='pool4', **pool_args) net = repeat(net, 3, conv2d, 512, filter_size=( 3, 3), name='conv5', **conv_args) net = max_pool(net, name='pool5', **pool_args) # Use conv2d instead of fully_connected layers. net = conv2d(net, 4096, filter_size=(7, 7), name='fc6', **conv_args) net = dropout(net, drop_p=1 - dropout_keep_prob, is_training=is_training, name='dropout6', **common_args) net = conv2d(net, 4096, filter_size=(1, 1), name='fc7', **conv_args) net = dropout(net, drop_p=1 - dropout_keep_prob, is_training=is_training, name='dropout7', **common_args) logits = conv2d(net, num_classes, filter_size=(1, 1), activation=None, name='logits', **logit_args) # Convert end_points_collection into a end_point dict. if spatial_squeeze: logits = tf.squeeze(logits, [1, 2], name='logits/squeezed') predictions = softmax(logits, name='predictions', **pred_args) return end_points(is_training)
def model(inputs, is_training, reuse, num_classes=5, dropout_keep_prob=0.5, spatial_squeeze=True, name='alexnet_v2', **kwargs): """AlexNet version 2. Described in: http://arxiv.org/pdf/1404.5997v2.pdf Parameters from: github.com/akrizhevsky/cuda-convnet2/blob/master/layers/ layers-imagenet-1gpu.cfg Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. To use in fully convolutional mode, set spatial_squeeze to false. The LRN layers have been removed and change the initializers from random_normal_initializer to xavier_initializer. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. name: Optional name for the variables. Returns: the last op containing the log predictions and end_points dict. """ common_args = common_layer_args(is_training, reuse) conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal(scale=1), untie_biases=False, **common_args) logit_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args) pred_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args) pool_args = make_args(padding='SAME', **common_args) # inputs = input((None, crop_size[1], crop_size[0], 3), **common_args) with tf.variable_scope(name, 'alexnet_v2', [inputs]): net = conv2d(inputs, 64, filter_size=(11, 11), stride=(4, 4), name='conv1', **conv_args) net = max_pool(net, stride=(2, 2), name='pool1', **pool_args) net = conv2d(net, 192, filter_size=(5, 5), name='conv2', **conv_args) net = max_pool(net, stride=(2, 2), name='pool2', **pool_args) net = conv2d(net, 384, name='conv3', **conv_args) net = conv2d(net, 384, name='conv4', **conv_args) net = conv2d(net, 256, name='conv5', **conv_args) net = max_pool(net, stride=(2, 2), name='pool5', **pool_args) # Use conv2d instead of fully_connected layers. net = conv2d(net, 4096, filter_size=(5, 5), name='fc6', **conv_args) net = dropout(net, drop_p=1 - dropout_keep_prob, name='dropout6', **common_args) net = conv2d(net, 4096, filter_size=(1, 1), name='fc7', **conv_args) net = dropout(net, drop_p=1 - dropout_keep_prob, name='dropout7', **common_args) net = global_avg_pool(net) logits = fc(net, num_classes, name='logits', **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training)
def model(is_training, reuse, inputs=None): common_trainable_args = common_layer_args(is_training, reuse, trainable=True) common_frozen_args = common_layer_args(is_training, reuse, trainable=False) conv_args = make_conv_args(activation=relu, **common_frozen_args) logit_args = make_args(activation=None, **common_trainable_args) common_args = common_frozen_args # move this down to train only a few layers common_args = common_trainable_args if inputs is None: net = input((None, crop_size[1], crop_size[0], 3), **common_args) else: net = inputs with tf.variable_scope('resnet_v1_50', reuse=reuse): mean_rgb = tf.get_variable(name='mean_rgb', initializer=tf.truncated_normal(shape=[3]), trainable=False) net = net - mean_rgb net = conv2d_same(net, 64, filter_size=(7, 7), stride=(2, 2), name='conv1', **conv_args) net = max_pool(net, filter_size=(3, 3), stride=(2, 2), padding='SAME', name='pool1') with tf.variable_scope('block1') as sc: with tf.variable_scope('unit_1'): net = bottleneck(net, 256, 64, 1, **common_args) with tf.variable_scope('unit_2'): net = bottleneck(net, 256, 64, 1, **common_args) with tf.variable_scope('unit_3'): net = bottleneck(net, 256, 64, 2, **common_args) net = collect_named_outputs(common_args['outputs_collections'], sc.name, net) with tf.variable_scope('block2') as sc: with tf.variable_scope('unit_1'): net = bottleneck(net, 512, 128, 1, **common_args) with tf.variable_scope('unit_2'): net = bottleneck(net, 512, 128, 1, **common_args) with tf.variable_scope('unit_3'): net = bottleneck(net, 512, 128, 1, **common_args) with tf.variable_scope('unit_4'): net = bottleneck(net, 512, 128, 2, **common_args) net = collect_named_outputs(common_args['outputs_collections'], sc.name, net) with tf.variable_scope('block3') as sc: with tf.variable_scope('unit_1'): net = bottleneck(net, 1024, 256, 1, **common_args) with tf.variable_scope('unit_2'): net = bottleneck(net, 1024, 256, 1, **common_args) with tf.variable_scope('unit_3'): net = bottleneck(net, 1024, 256, 1, **common_args) with tf.variable_scope('unit_4'): net = bottleneck(net, 1024, 256, 1, **common_args) with tf.variable_scope('unit_5'): net = bottleneck(net, 1024, 256, 1, **common_args) with tf.variable_scope('unit_6'): net = bottleneck(net, 1024, 256, 2, **common_args) net = collect_named_outputs(common_args['outputs_collections'], sc.name, net) with tf.variable_scope('block4') as sc: with tf.variable_scope('unit_1'): net = bottleneck(net, 2048, 512, 1, **common_args) with tf.variable_scope('unit_2'): net = bottleneck(net, 2048, 512, 1, **common_args) with tf.variable_scope('unit_3'): net = bottleneck(net, 2048, 512, 1, **common_args) net = collect_named_outputs(common_args['outputs_collections'], sc.name, net) net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) net = conv2d(net, 1000, filter_size=(1, 1), name='logits', **logit_args) logits = squeeze(net, axis=[1, 2], name='logits', **common_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(common_args['is_training'])