def subsample(inputs, factor, scope=None): """Subsamples the input along the spatial dimensions. Args: inputs: A `Tensor` of size [batch, height_in, width_in, channels]. factor: The subsampling factor. scope: Optional variable_scope. Returns: output: A `Tensor` of size [batch, height_out, width_out, channels] with the input, either intact (if factor == 1) or subsampled (if factor > 1). """ if factor == 1: return inputs else: return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def resnet_v2(inputs, blocks, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope=None): """Generator for v2 (preactivation) ResNet models. This function generates a family of ResNet v2 models. See the resnet_v2_*() methods for specific model instantiations, obtained by selecting different block instantiations that produce ResNets of various depths. Training for image classification on Imagenet is usually done with [224, 224] inputs, resulting in [7, 7] feature maps at the output of the last ResNet block for the ResNets defined in [1] that have nominal stride equal to 32. However, for dense prediction tasks we advise that one uses inputs with spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In this case the feature maps at the ResNet output will have spatial shape [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1] and corners exactly aligned with the input image corners, which greatly facilitates alignment of the features to the image. Using as input [225, 225] images results in [8, 8] feature maps at the output of the last ResNet block. For dense prediction tasks, the ResNet needs to run in fully-convolutional (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all have nominal stride equal to 32 and a good choice in FCN mode is to use output_stride=16 in order to increase the density of the computed features at small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. blocks: A list of length equal to the number of ResNet blocks. Each element is a resnet_utils.Block object describing the units in the block. num_classes: Number of predicted classes for classification tasks. If None we return the features before the logit layer. is_training: whether batch_norm layers are in training mode. global_pool: If True, we perform global average pooling before computing the logits. Set to True for image classification, False for dense prediction. output_stride: If None, then the output will be computed at the nominal network stride. If output_stride is not None, it specifies the requested ratio of input to output spatial resolution. include_root_block: If True, include the initial convolution followed by max-pooling, if False excludes it. If excluded, `inputs` should be the results of an activation-less convolution. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. If global_pool is False, then height_out and width_out are reduced by a factor of output_stride compared to the respective height_in and width_in, else both height_out and width_out equal one. If num_classes is None, then net is the output of the last ResNet block, potentially after global average pooling. If num_classes is not None, net contains the pre-softmax activations. end_points: A dictionary from components of the network to the corresponding activation. Raises: ValueError: If the target output_stride is not valid. """ with variable_scope.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc: end_points_collection = sc.original_name_scope + '_end_points' with arg_scope( [layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense], outputs_collections=end_points_collection): with arg_scope([layers.batch_norm], is_training=is_training): net = inputs if include_root_block: if output_stride is not None: if output_stride % 4 != 0: raise ValueError( 'The output_stride needs to be a multiple of 4.' ) output_stride /= 4 # We do not include batch normalization or activation functions in # conv1 because the first ResNet unit will perform these. Cf. # Appendix of [2]. with arg_scope([layers_lib.conv2d], activation_fn=None, normalizer_fn=None): net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') net = layers.max_pool2d(net, [3, 3], stride=2, scope='pool1') net = resnet_utils.stack_blocks_dense(net, blocks, output_stride) # This is needed because the pre-activation variant does not have batch # normalization or activation functions in the residual unit output. See # Appendix of [2]. net = layers.batch_norm(net, activation_fn=nn_ops.relu, scope='postnorm') if global_pool: # Global average pooling. net = math_ops.reduce_mean(net, [1, 2], name='pool5', keepdims=True) if num_classes is not None: net = layers_lib.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits') # Convert end_points_collection into a dictionary of end_points. end_points = utils.convert_collection_to_dict( end_points_collection) if num_classes is not None: end_points['predictions'] = layers.softmax( net, scope='predictions') return net, end_points
def vgg_a(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_a'): """Oxford Net VGG 11-Layers version A Example. Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. Returns: the last op containing the log predictions and end_points dict. """ with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc: end_points_collection = sc.original_name_scope + '_end_points' # Collect outputs for conv2d, fully_connected and max_pool2d. with arg_scope([layers.conv2d, layers_lib.max_pool2d], outputs_collections=end_points_collection): net = layers_lib.repeat(inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1') net = layers_lib.max_pool2d(net, [2, 2], scope='pool1') net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2') net = layers_lib.max_pool2d(net, [2, 2], scope='pool2') net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3') net = layers_lib.max_pool2d(net, [2, 2], scope='pool3') net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4') net = layers_lib.max_pool2d(net, [2, 2], scope='pool4') net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5') net = layers_lib.max_pool2d(net, [2, 2], scope='pool5') # Use conv2d instead of fully_connected layers. net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6') net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6') net = layers.conv2d(net, 4096, [1, 1], scope='fc7') net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7') net = layers.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8') # Convert end_points_collection into a end_point dict. end_points = utils.convert_collection_to_dict( end_points_collection) if spatial_squeeze: net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed') end_points[sc.name + '/fc8'] = net return net, end_points
def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'): """Defines the Inception V1 base architecture. This architecture is defined in: Going deeper with convolutions Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich. http://arxiv.org/pdf/1409.4842v1.pdf. Args: inputs: a tensor of size [batch_size, height, width, channels]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c'] scope: Optional variable_scope. Returns: A dictionary from components of the network to the corresponding activation. Raises: ValueError: if final_endpoint is not set to one of the predefined values. """ end_points = {} with variable_scope.variable_scope(scope, 'InceptionV1', [inputs]): with arg_scope([layers.conv2d, layers_lib.fully_connected], weights_initializer=trunc_normal(0.01)): with arg_scope([layers.conv2d, layers_lib.max_pool2d], stride=1, padding='SAME'): end_point = 'Conv2d_1a_7x7' net = layers.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'MaxPool_2a_3x3' net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Conv2d_2b_1x1' net = layers.conv2d(net, 64, [1, 1], scope=end_point) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Conv2d_2c_3x3' net = layers.conv2d(net, 192, [3, 3], scope=end_point) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'MaxPool_3a_3x3' net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_3b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_3c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'MaxPool_4a_3x3' net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_4b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_4c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_4d' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_4e' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_4f' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'MaxPool_5a_2x2' net = layers_lib.max_pool2d(net, [2, 2], stride=2, scope=end_point) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_5b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points end_point = 'Mixed_5c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if final_endpoint == end_point: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint)
def alexnet_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='alexnet_v2'): """AlexNet version 2. Described in: http://arxiv.org/pdf/1404.5997v2.pdf Parameters from: github.com/akrizhevsky/cuda-convnet2/blob/master/layers/ layers-imagenet-1gpu.cfg Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 224x224. To use in fully convolutional mode, set spatial_squeeze to false. The LRN layers have been removed and change the initializers from random_normal_initializer to xavier_initializer. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. Returns: the last op containing the log predictions and end_points dict. """ with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc: end_points_collection = sc.original_name_scope + '_end_points' # Collect outputs for conv2d, fully_connected and max_pool2d. with arg_scope( [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d], outputs_collections=[end_points_collection]): net = layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1') net = layers.conv2d(net, 192, [5, 5], scope='conv2') net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2') net = layers.conv2d(net, 384, [3, 3], scope='conv3') net = layers.conv2d(net, 384, [3, 3], scope='conv4') net = layers.conv2d(net, 256, [3, 3], scope='conv5') net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5') # Use conv2d instead of fully_connected layers. with arg_scope( [layers.conv2d], weights_initializer=trunc_normal(0.005), biases_initializer=init_ops.constant_initializer(0.1)): net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6') net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6') net = layers.conv2d(net, 4096, [1, 1], scope='fc7') net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7') net = layers.conv2d( net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, biases_initializer=init_ops.zeros_initializer(), scope='fc8') # Convert end_points_collection into a end_point dict. end_points = utils.convert_collection_to_dict( end_points_collection) if spatial_squeeze: net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed') end_points[sc.name + '/fc8'] = net return net, end_points
def overfeat(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='overfeat'): """Contains the model definition for the OverFeat network. The definition for the network was obtained from: OverFeat: Integrated Recognition, Localization and Detection using Convolutional Networks Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and Yann LeCun, 2014 http://arxiv.org/abs/1312.6229 Note: All the fully_connected layers have been transformed to conv2d layers. To use in classification mode, resize input to 231x231. To use in fully convolutional mode, set spatial_squeeze to false. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether or not the model is being trained. dropout_keep_prob: the probability that activations are kept in the dropout layers during training. spatial_squeeze: whether or not should squeeze the spatial dimensions of the outputs. Useful to remove unnecessary dimensions for classification. scope: Optional scope for the variables. Returns: the last op containing the log predictions and end_points dict. """ with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc: end_points_collection = sc.name + '_end_points' # Collect outputs for conv2d, fully_connected and max_pool2d with arg_scope( [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d], outputs_collections=end_points_collection): net = layers.conv2d( inputs, 64, [11, 11], 4, padding='VALID', scope='conv1') net = layers_lib.max_pool2d(net, [2, 2], scope='pool1') net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2') net = layers_lib.max_pool2d(net, [2, 2], scope='pool2') net = layers.conv2d(net, 512, [3, 3], scope='conv3') net = layers.conv2d(net, 1024, [3, 3], scope='conv4') net = layers.conv2d(net, 1024, [3, 3], scope='conv5') net = layers_lib.max_pool2d(net, [2, 2], scope='pool5') with arg_scope( [layers.conv2d], weights_initializer=trunc_normal(0.005), biases_initializer=init_ops.constant_initializer(0.1)): # Use conv2d instead of fully_connected layers. net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6') net = layers_lib.dropout( net, dropout_keep_prob, is_training=is_training, scope='dropout6') net = layers.conv2d(net, 4096, [1, 1], scope='fc7') net = layers_lib.dropout( net, dropout_keep_prob, is_training=is_training, scope='dropout7') net = layers.conv2d( net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, biases_initializer=init_ops.zeros_initializer(), scope='fc8') # Convert end_points_collection into a end_point dict. end_points = utils.convert_collection_to_dict(end_points_collection) if spatial_squeeze: net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed') end_points[sc.name + '/fc8'] = net return net, end_points