def inception_v1(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, prediction_fn=layers_lib.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV1'): """Defines the Inception V1 architecture. This architecture is defined in: Going deeper with convolutions Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich. http://arxiv.org/pdf/1409.4842v1.pdf. The default image size used to train this network is 224x224. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape is [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: logits: the pre-softmax activations, a tensor of size [batch_size, num_classes] end_points: a dictionary from components of the network to the corresponding activation. """ # Final pooling and prediction with variable_scope.variable_scope(scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope: with arg_scope([layers_lib.batch_norm, layers_lib.dropout], is_training=is_training): net, end_points = inception_v1_base(inputs, scope=scope) with variable_scope.variable_scope('Logits'): net = layers_lib.avg_pool2d(net, [7, 7], stride=1, scope='MaxPool_0a_7x7') net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b') logits = layers.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_0c_1x1') if spatial_squeeze: logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze') end_points['Logits'] = logits end_points['Predictions'] = prediction_fn(logits, scope='Predictions') return logits, end_points
def inception_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=layers_lib.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV2'): """Inception v2 model for classification. Constructs an Inception v2 network for classification as described in http://arxiv.org/abs/1502.03167. The recommended image size used to train this network is 224x224. For image sizes that differ substantially, it is recommended to use inception_v2_base() and connect custom final layers to the output. Args: inputs: a tensor of shape [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. Note that input image sizes other than 224x224 might lead to different spatial dimensions, and hence cannot be squeezed. In this event, it is best to set spatial_squeeze as False, and perform a reduce_mean over the resulting spatial dimensions with sizes exceeding 1. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: logits: the pre-softmax activations, a tensor of size [batch_size, num_classes] end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if depth_multiplier <= 0. """ if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') # Final pooling and prediction with variable_scope.variable_scope( scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope: with arg_scope( [layers_lib.batch_norm, layers_lib.dropout], is_training=is_training): net, end_points = inception_v2_base( inputs, scope=scope, min_depth=min_depth, depth_multiplier=depth_multiplier) with variable_scope.variable_scope('Logits'): kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7]) net = layers_lib.avg_pool2d( net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size)) # 1 x 1 x 1024 net = layers_lib.dropout( net, keep_prob=dropout_keep_prob, scope='Dropout_1b') logits = layers.conv2d( net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1') if spatial_squeeze: logits = array_ops.squeeze( logits, [1, 2], name='SpatialSqueeze') end_points['Logits'] = logits end_points['Predictions'] = prediction_fn( logits, scope='Predictions') return logits, end_points
def inception_v3(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=layers_lib.softmax, spatial_squeeze=True, reuse=None, create_aux_logits=True, scope='InceptionV3'): """Inception model from http://arxiv.org/abs/1512.00567. "Rethinking the Inception Architecture for Computer Vision" Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna. With the default arguments this method constructs the exact model defined in the paper. However, one can experiment with variations of the inception_v3 network by changing arguments dropout_keep_prob, min_depth and depth_multiplier. The default image size used to train this network is 299x299. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape is [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. To use this parameter, the input images must be smaller than 300x300 pixels, in which case the output logit layer does not contain spatial information and can be removed. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. create_aux_logits: whether to create auxiliary logits. scope: Optional variable_scope. Returns: logits: the pre-softmax activations, a tensor of size [batch_size, num_classes] end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if 'depth_multiplier' is less than or equal to zero. """ if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') def depth(d): return max(int(d * depth_multiplier), min_depth) with variable_scope.variable_scope(scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope: with arg_scope([layers_lib.batch_norm, layers_lib.dropout], is_training=is_training): net, end_points = inception_v3_base( inputs, scope=scope, min_depth=min_depth, depth_multiplier=depth_multiplier) # Auxiliary Head logits if create_aux_logits and num_classes: with arg_scope([ layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d ], stride=1, padding='SAME'): aux_logits = end_points['Mixed_6e'] with variable_scope.variable_scope('AuxLogits'): aux_logits = layers_lib.avg_pool2d( aux_logits, [5, 5], stride=3, padding='VALID', scope='AvgPool_1a_5x5') aux_logits = layers.conv2d(aux_logits, depth(128), [1, 1], scope='Conv2d_1b_1x1') # Shape of feature map before the final layer. kernel_size = _reduced_kernel_size_for_small_input( aux_logits, [5, 5]) aux_logits = layers.conv2d( aux_logits, depth(768), kernel_size, weights_initializer=trunc_normal(0.01), padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size)) aux_logits = layers.conv2d( aux_logits, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, weights_initializer=trunc_normal(0.001), scope='Conv2d_2b_1x1') if spatial_squeeze: aux_logits = array_ops.squeeze( aux_logits, [1, 2], name='SpatialSqueeze') end_points['AuxLogits'] = aux_logits # Final pooling and prediction with variable_scope.variable_scope('Logits'): kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8]) net = layers_lib.avg_pool2d( net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size)) # 1 x 1 x 2048 net = layers_lib.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b') end_points['PreLogits'] = net # 2048 logits = layers.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1') if spatial_squeeze: logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze') # 1000 end_points['Logits'] = logits end_points['Predictions'] = prediction_fn(logits, scope='Predictions') return logits, end_points
def inception_v2_base(inputs, final_endpoint='Mixed_5c', min_depth=16, depth_multiplier=1.0, scope=None): """Inception v2 (6a2). Constructs an Inception v2 network from inputs to the given final endpoint. This method can construct the network up to the layer inception(5b) as described in http://arxiv.org/abs/1502.03167. Args: inputs: a tensor of shape [batch_size, height, width, channels]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c']. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. scope: Optional variable_scope. Returns: tensor_out: output tensor corresponding to the final_endpoint. end_points: a set of activations for external use, for example summaries or losses. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ # end_points will collect relevant activations for external use, for example # summaries or losses. end_points = {} # Used to find thinned depths for each layer. if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') def depth(d): return max(int(d * depth_multiplier), min_depth) with variable_scope.variable_scope(scope, 'InceptionV2', [inputs]): with arg_scope( [ layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d, layers.separable_conv2d ], stride=1, padding='SAME'): # Note that sizes in the comments below assume an input spatial size of # 224x224, however, the inputs can be of any size greater 32x32. # 224 x 224 x 3 end_point = 'Conv2d_1a_7x7' # depthwise_multiplier here is different from depth_multiplier. # depthwise_multiplier determines the output channels of the initial # depthwise conv (see docs for tf.nn.separable_conv2d), while # depth_multiplier controls the # channels of the subsequent 1x1 # convolution. Must have # in_channels * depthwise_multipler <= out_channels # so that the separable convolution is not overparameterized. depthwise_multiplier = min(int(depth(64) / 3), 8) net = layers.separable_conv2d( inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier, stride=2, weights_initializer=trunc_normal(1.0), scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 112 x 112 x 64 end_point = 'MaxPool_2a_3x3' net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 64 end_point = 'Conv2d_2b_1x1' net = layers.conv2d( net, depth(64), [1, 1], scope=end_point, weights_initializer=trunc_normal(0.1)) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 64 end_point = 'Conv2d_2c_3x3' net = layers.conv2d(net, depth(192), [3, 3], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 192 end_point = 'MaxPool_3a_3x3' net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 192 # Inception module. end_point = 'Mixed_3b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d( net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 256 end_point = 'Mixed_3c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d( net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 320 end_point = 'Mixed_4a' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = layers.conv2d( branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1 = layers.conv2d( branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers_lib.max_pool2d( net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = array_ops.concat([branch_0, branch_1, branch_2], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(224), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d( net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d( net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4d' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(160), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d( net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4e' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(96), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d( net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_5a' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = layers.conv2d( branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3') branch_1 = layers.conv2d( branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers_lib.max_pool2d( net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = array_ops.concat([branch_0, branch_1, branch_2], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 7 x 7 x 1024 end_point = 'Mixed_5b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d( net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 7 x 7 x 1024 end_point = 'Mixed_5c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d( net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = layers.conv2d( branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = layers.conv2d( branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d( branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.max_pool2d( net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = layers.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3_base(inputs, final_endpoint='Mixed_7c', min_depth=16, depth_multiplier=1.0, scope=None): """Inception model from http://arxiv.org/abs/1512.00567. Constructs an Inception v3 network from inputs to the given final endpoint. This method can construct the network up to the final inception block Mixed_7c. Note that the names of the layers in the paper do not correspond to the names of the endpoints registered by this function although they build the same network. Here is a mapping from the old_names to the new names: Old name | New name ======================================= conv0 | Conv2d_1a_3x3 conv1 | Conv2d_2a_3x3 conv2 | Conv2d_2b_3x3 pool1 | MaxPool_3a_3x3 conv3 | Conv2d_3b_1x1 conv4 | Conv2d_4a_3x3 pool2 | MaxPool_5a_3x3 mixed_35x35x256a | Mixed_5b mixed_35x35x288a | Mixed_5c mixed_35x35x288b | Mixed_5d mixed_17x17x768a | Mixed_6a mixed_17x17x768b | Mixed_6b mixed_17x17x768c | Mixed_6c mixed_17x17x768d | Mixed_6d mixed_17x17x768e | Mixed_6e mixed_8x8x1280a | Mixed_7a mixed_8x8x2048a | Mixed_7b mixed_8x8x2048b | Mixed_7c Args: inputs: a tensor of size [batch_size, height, width, channels]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c']. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. scope: Optional variable_scope. Returns: tensor_out: output tensor corresponding to the final_endpoint. end_points: a set of activations for external use, for example summaries or losses. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ # end_points will collect relevant activations for external use, for example # summaries or losses. end_points = {} if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') def depth(d): return max(int(d * depth_multiplier), min_depth) with variable_scope.variable_scope(scope, 'InceptionV3', [inputs]): with arg_scope( [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d], stride=1, padding='VALID'): # 299 x 299 x 3 end_point = 'Conv2d_1a_3x3' net = layers.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 149 x 149 x 32 end_point = 'Conv2d_2a_3x3' net = layers.conv2d(net, depth(32), [3, 3], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 147 x 147 x 32 end_point = 'Conv2d_2b_3x3' net = layers.conv2d(net, depth(64), [3, 3], padding='SAME', scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 147 x 147 x 64 end_point = 'MaxPool_3a_3x3' net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 73 x 73 x 64 end_point = 'Conv2d_3b_1x1' net = layers.conv2d(net, depth(80), [1, 1], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 73 x 73 x 80. end_point = 'Conv2d_4a_3x3' net = layers.conv2d(net, depth(192), [3, 3], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 71 x 71 x 192. end_point = 'MaxPool_5a_3x3' net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 35 x 35 x 192. # Inception blocks with arg_scope( [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d], stride=1, padding='SAME'): # mixed: 35 x 35 x 256. end_point = 'Mixed_5b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_1: 35 x 35 x 288. end_point = 'Mixed_5c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(48), [1, 1], scope='Conv2d_0b_1x1') branch_1 = layers.conv2d(branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_2: 35 x 35 x 288. end_point = 'Mixed_5d' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(48), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = layers.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_3: 17 x 17 x 768. end_point = 'Mixed_6a' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(384), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1 = layers.conv2d(branch_1, depth(96), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_1x1') with variable_scope.variable_scope('Branch_2'): branch_2 = layers_lib.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = array_ops.concat([branch_0, branch_1, branch_2], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed4: 17 x 17 x 768. end_point = 'Mixed_6b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7') branch_1 = layers.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(128), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1') branch_2 = layers.conv2d(branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7') branch_2 = layers.conv2d(branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1') branch_2 = layers.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_5: 17 x 17 x 768. end_point = 'Mixed_6c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7') branch_1 = layers.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1') branch_2 = layers.conv2d(branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7') branch_2 = layers.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1') branch_2 = layers.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_6: 17 x 17 x 768. end_point = 'Mixed_6d' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7') branch_1 = layers.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1') branch_2 = layers.conv2d(branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7') branch_2 = layers.conv2d(branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1') branch_2 = layers.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_7: 17 x 17 x 768. end_point = 'Mixed_6e' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7') branch_1 = layers.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1') with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1') branch_2 = layers.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7') branch_2 = layers.conv2d(branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1') branch_2 = layers.conv2d(branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7') with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_8: 8 x 8 x 1280. end_point = 'Mixed_7a' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_0 = layers.conv2d(branch_0, depth(320), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') branch_1 = layers.conv2d(branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7') branch_1 = layers.conv2d(branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1') branch_1 = layers.conv2d(branch_1, depth(192), [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with variable_scope.variable_scope('Branch_2'): branch_2 = layers_lib.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = array_ops.concat([branch_0, branch_1, branch_2], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_9: 8 x 8 x 2048. end_point = 'Mixed_7b' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1') branch_1 = array_ops.concat([ layers.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), layers.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1') ], 3) with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3') branch_2 = array_ops.concat([ layers.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), layers.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1') ], 3) with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_10: 8 x 8 x 2048. end_point = 'Mixed_7c' with variable_scope.variable_scope(end_point): with variable_scope.variable_scope('Branch_0'): branch_0 = layers.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1') with variable_scope.variable_scope('Branch_1'): branch_1 = layers.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1') branch_1 = array_ops.concat([ layers.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), layers.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1') ], 3) with variable_scope.variable_scope('Branch_2'): branch_2 = layers.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1') branch_2 = layers.conv2d(branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3') branch_2 = array_ops.concat([ layers.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), layers.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1') ], 3) with variable_scope.variable_scope('Branch_3'): branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = layers.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') net = array_ops.concat( [branch_0, branch_1, branch_2, branch_3], 3) end_points[end_point] = net if end_point == final_endpoint: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint)