def inference(input_image,
              num_classes,
              for_training=False,
              restore_logits=True,
              scope=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
    }
    # Set weight_decay for weights in Conv and FC layers.
    with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
        with slim.arg_scope([slim.ops.conv2d],
                            stddev=0.1,
                            activation=tf.nn.relu,
                            batch_norm_params=batch_norm_params):
            logits, endpoints = slim.inception.inception_v3(
                input_image,
                dropout_keep_prob=0.8,
                num_classes=num_classes,
                is_training=for_training,
                restore_logits=restore_logits,
                scope=scope)
            predictions = endpoints['predictions']

    return logits, predictions
示例#2
0
def inception_resnet_v2_arg_scope(weight_decay=0.00004,
                                  batch_norm_decay=0.9997,
                                  batch_norm_epsilon=0.001,
                                  activation_fn=tf.nn.relu):
    """Returns the scope with the default parameters for inception_resnet_v2.

  Args:
    weight_decay: the weight decay for weights variables.
    batch_norm_decay: decay for the moving average of batch_norm momentums.
    batch_norm_epsilon: small float added to variance to avoid dividing by zero.
    activation_fn: Activation function for conv2d.

  Returns:
    a arg_scope with the parameters needed for inception_resnet_v2.
  """
    # Set weight_decay for weights in conv2d and fully_connected layers.
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_regularizer=slim.l2_regularizer(weight_decay)):

        batch_norm_params = {
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
            'fused': None,  # Use fused batch norm if possible.
        }
        # Set activation_fn and parameters for batch_norm.
        with slim.arg_scope([slim.conv2d],
                            activation_fn=activation_fn,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params) as scope:
            return scope
示例#3
0
def alexnet_v2_arg_scope(weight_decay=0.0005):
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      biases_initializer=tf.constant_initializer(0.1),
                      weights_regularizer=slim.l2_regularizer(weight_decay)):
    with slim.arg_scope([slim.conv2d], padding='SAME'):
      with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
        return arg_sc
示例#4
0
def overfeat_arg_scope(weight_decay=0.0005):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_initializer=tf.zeros_initializer()):
        with slim.arg_scope([slim.conv2d], padding='SAME'):
            with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
                return arg_sc
示例#5
0
def inference(images,
              num_classes,
              for_training=False,
              restore_logits=True,
              scope=None):
    """Build Inception v3 model architecture.

  See here for reference: http://arxiv.org/abs/1512.00567

  Args:
    images: Images returned from inputs() or distorted_inputs().
    num_classes: number of classes
    for_training: If set to `True`, build the inference model for training.
      Kernels that operate differently for inference during training
      e.g. dropout, are appropriately configured.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: optional prefix string identifying the ImageNet tower.

  Returns:
    Logits. 2-D float Tensor.
    Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
  """
    # Parameters for BatchNorm.
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
    }
    # Set weight_decay for weights in Conv and FC layers.
    with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
        with slim.arg_scope(
            [slim.ops.conv2d],
                #  with slim.arg_scope([slim.conv2d, slim.fully_connected], weight_decay=0.00004):
                #    with slim.arg_scope([slim.conv2d],
                stddev=0.1,
                activation=tf.nn.relu,
                batch_norm_params=batch_norm_params):
            #     with tf.variable_scope('root', partitioner=tf.fixed_size_partitioner(2,axis=0)):
            logits, endpoints = slim.inception.inception_v3(
                images,
                dropout_keep_prob=0.8,
                num_classes=num_classes,
                is_training=for_training,
                restore_logits=restore_logits,
                scope=scope)

    # Add summaries for viewing model statistics on TensorBoard.
    _activation_summaries(endpoints)

    # Grab the logits associated with the side head. Employed during training.
    auxiliary_logits = endpoints['aux_logits']

    return logits, auxiliary_logits
示例#6
0
def resnet_arg_scope(weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True,
                     activation_fn=tf.nn.relu,
                     use_batch_norm=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.
    activation_fn: The activation function which is used in ResNet.
    use_batch_norm: Whether or not to use batch normalization.

  Returns:
    An `arg_scope` to use for the resnet models.
  """
  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': tf.GraphKeys.UPDATE_OPS,
      'fused': None,  # Use fused batch norm if possible.
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=activation_fn,
      normalizer_fn=slim.batch_norm if use_batch_norm else None,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # slim.arg_scope([slim.max_pool2d], padding='VALID').
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
示例#7
0
def vgg_arg_scope(weight_decay=0.0005):
    """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_initializer=tf.zeros_initializer()):
        with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
            return arg_sc
示例#8
0
 def testAtrousFullyConvolutionalValues(self):
   """Verify dense feature extraction with atrous convolution."""
   nominal_stride = 32
   for output_stride in [4, 8, 16, 32, None]:
     with slim.arg_scope(resnet_utils.resnet_arg_scope()):
       with tf.Graph().as_default():
         with self.test_session() as sess:
           tf.set_random_seed(0)
           inputs = create_test_input(2, 81, 81, 3)
           # Dense feature extraction followed by subsampling.
           output, _ = self._resnet_small(inputs, None, is_training=False,
                                          global_pool=False,
                                          output_stride=output_stride)
           if output_stride is None:
             factor = 1
           else:
             factor = nominal_stride // output_stride
           output = resnet_utils.subsample(output, factor)
           # Make the two networks use the same weights.
           tf.get_variable_scope().reuse_variables()
           # Feature extraction at the nominal network rate.
           expected, _ = self._resnet_small(inputs, None, is_training=False,
                                            global_pool=False)
           sess.run(tf.global_variables_initializer())
           self.assertAllClose(output.eval(), expected.eval(),
                               atol=1e-4, rtol=1e-4)
示例#9
0
 def testEndPointsV1(self):
   """Test the end points of a tiny v1 bottleneck network."""
   blocks = [
       resnet_v1.resnet_v1_block(
           'block1', base_depth=1, num_units=2, stride=2),
       resnet_v1.resnet_v1_block(
           'block2', base_depth=2, num_units=2, stride=1),
   ]
   inputs = create_test_input(2, 32, 16, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
   expected = [
       'tiny/block1/unit_1/bottleneck_v1/shortcut',
       'tiny/block1/unit_1/bottleneck_v1/conv1',
       'tiny/block1/unit_1/bottleneck_v1/conv2',
       'tiny/block1/unit_1/bottleneck_v1/conv3',
       'tiny/block1/unit_2/bottleneck_v1/conv1',
       'tiny/block1/unit_2/bottleneck_v1/conv2',
       'tiny/block1/unit_2/bottleneck_v1/conv3',
       'tiny/block2/unit_1/bottleneck_v1/shortcut',
       'tiny/block2/unit_1/bottleneck_v1/conv1',
       'tiny/block2/unit_1/bottleneck_v1/conv2',
       'tiny/block2/unit_1/bottleneck_v1/conv3',
       'tiny/block2/unit_2/bottleneck_v1/conv1',
       'tiny/block2/unit_2/bottleneck_v1/conv2',
       'tiny/block2/unit_2/bottleneck_v1/conv3']
   self.assertItemsEqual(expected, end_points)
示例#10
0
 def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
   """A plain ResNet without extra layers before or after the ResNet blocks."""
   with tf.variable_scope(scope, values=[inputs]):
     with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
       net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
       end_points = slim.utils.convert_collection_to_dict('end_points')
       return net, end_points
示例#11
0
def inference(images,
              keep_probability,
              phase_train=True,
              bottleneck_layer_size=128,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES],
    }

    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.compat.v1.truncated_normal_initializer(
                stddev=0.1),
            weights_regularizer=tf.keras.regularizers.l2(0.5 * (weight_decay)),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        return inception_resnet_v1(images,
                                   is_training=phase_train,
                                   dropout_keep_prob=keep_probability,
                                   bottleneck_layer_size=bottleneck_layer_size,
                                   reuse=reuse)
示例#12
0
def gan_discriminator(images1, images2, reuse=False):
    wd = 0

    images = tf.concat(3, [images1, images2])
    net = images

    with tf.variable_scope('discriminator'):

        with slim.arg_scope([slim.ops.conv2d], stddev=0.1, weight_decay=wd, is_training=True):

            if reuse:
                tf.get_variable_scope().reuse_variables()

            net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 32, [3, 3], batch_norm_params={}, scope='conv1')
            net = slim.ops.max_pool(net, [2, 2], scope='pool1')

            net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 64, [3, 3], batch_norm_params={}, scope='conv2')
            net = slim.ops.max_pool(net, [2, 2], scope='pool2')

            net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 128, [3, 3], batch_norm_params={}, scope='conv3')
            net = slim.ops.max_pool(net, [2, 2], scope='pool3')

            net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 256, [3, 3], batch_norm_params={}, scope='conv4')
            net = slim.ops.max_pool(net, [2, 2], scope='pool4')

            net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 1, [3, 3], activation=None, scope='conv5')

            net = tf.reduce_mean(net, reduction_indices=[1, 2, 3], name='reduce')
            # net = tf.nn.sigmoid(net)

    return net
示例#13
0
def block_reduction_a(inputs, scope=None, reuse=None):
    """Builds Reduction-A block for Inception v4 network."""
    # By default use stride=1 and SAME padding
    with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
                        stride=1,
                        padding='SAME'):
        with tf.variable_scope(scope, 'BlockReductionA', [inputs],
                               reuse=reuse):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(inputs,
                                       384, [3, 3],
                                       stride=2,
                                       padding='VALID',
                                       scope='Conv2d_1a_3x3')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(inputs,
                                       192, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_1 = slim.conv2d(branch_1,
                                       224, [3, 3],
                                       scope='Conv2d_0b_3x3')
                branch_1 = slim.conv2d(branch_1,
                                       256, [3, 3],
                                       stride=2,
                                       padding='VALID',
                                       scope='Conv2d_1a_3x3')
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.max_pool2d(inputs, [3, 3],
                                           stride=2,
                                           padding='VALID',
                                           scope='MaxPool_1a_3x3')
            return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
示例#14
0
def build_net(images1, images2, is_training=True):
    images1, images2 = normalize_images(images1, images2)
    images = tf.concat(3, [images1, images2])

    wd = 0

    with slim.arg_scope([slim.ops.conv2d], stddev=0.01, weight_decay=wd, is_training=is_training):
        net = slim.ops.repeat_op(1, images, slim.ops.conv2d, 48, [3, 3], scope='conv1')
        net = slim.ops.max_pool(net, [2, 2], scope='pool1')
        net = tf.nn.lrn(net, name='lrn1')

        net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 64, [3, 3], scope='conv2')
        net = slim.ops.max_pool(net, [2, 2], scope='pool2')
        net = tf.nn.lrn(net, name='lrn2')

        net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 128, [3, 3], scope='conv3')
        net = slim.ops.max_pool(net, [2, 2], scope='pool3')
        net = tf.nn.lrn(net, name='lrn3')

        net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 256, [3, 3], scope='conv4')
        net = slim.ops.max_pool(net, [2, 2], scope='pool4')
        net = tf.nn.lrn(net, name='lrn4')

        net = slim.ops.repeat_op(1, net, slim.ops.conv2d, 2, [3, 3], activation=None, scope='conv5')

        net = tf.reduce_mean(net, reduction_indices=[1, 2], name="reduce")
        net = tf.nn.softmax(net, name="softmax")

    return net
示例#15
0
 def testEndpointNames(self):
     # Like ResnetUtilsTest.testEndPointsV2(), but for the public API.
     global_pool = True
     num_classes = 10
     inputs = create_test_input(2, 224, 224, 3)
     with slim.arg_scope(resnet_utils.resnet_arg_scope()):
         _, end_points = self._resnet_small(inputs,
                                            num_classes,
                                            global_pool=global_pool,
                                            scope='resnet')
     expected = ['resnet/conv1']
     for block in range(1, 5):
         for unit in range(1, 4 if block < 4 else 3):
             for conv in range(1, 4):
                 expected.append(
                     'resnet/block%d/unit_%d/bottleneck_v2/conv%d' %
                     (block, unit, conv))
             expected.append('resnet/block%d/unit_%d/bottleneck_v2' %
                             (block, unit))
         expected.append('resnet/block%d/unit_1/bottleneck_v2/shortcut' %
                         block)
         expected.append('resnet/block%d' % block)
     expected.extend([
         'global_pool', 'resnet/logits', 'resnet/spatial_squeeze',
         'predictions'
     ])
     self.assertItemsEqual(end_points.keys(), expected)
示例#16
0
    def testAtrousValuesBottleneck(self):
        """Verify the values of dense feature extraction by atrous convolution.

    Make sure that dense feature extraction by stack_blocks_dense() followed by
    subsampling gives identical results to feature extraction at the nominal
    network output stride using the simple self._stack_blocks_nondense() above.
    """
        block = resnet_v2.resnet_v2_block
        blocks = [
            block('block1', base_depth=1, num_units=2, stride=2),
            block('block2', base_depth=2, num_units=2, stride=2),
            block('block3', base_depth=4, num_units=2, stride=2),
            block('block4', base_depth=8, num_units=2, stride=1),
        ]
        nominal_stride = 8

        # Test both odd and even input dimensions.
        height = 30
        width = 31
        with slim.arg_scope(resnet_utils.resnet_arg_scope()):
            with slim.arg_scope([slim.batch_norm], is_training=False):
                for output_stride in [1, 2, 4, 8, None]:
                    with tf.Graph().as_default():
                        with self.test_session() as sess:
                            tf.set_random_seed(0)
                            inputs = create_test_input(1, height, width, 3)
                            # Dense feature extraction followed by subsampling.
                            output = resnet_utils.stack_blocks_dense(
                                inputs, blocks, output_stride)
                            if output_stride is None:
                                factor = 1
                            else:
                                factor = nominal_stride // output_stride

                            output = resnet_utils.subsample(output, factor)
                            # Make the two networks use the same weights.
                            tf.get_variable_scope().reuse_variables()
                            # Feature extraction at the nominal network rate.
                            expected = self._stack_blocks_nondense(
                                inputs, blocks)
                            sess.run(tf.global_variables_initializer())
                            output, expected = sess.run([output, expected])
                            self.assertAllClose(output,
                                                expected,
                                                atol=1e-4,
                                                rtol=1e-4)
示例#17
0
 def testModelHasExpectedNumberOfParameters(self):
     batch_size = 5
     height, width = 299, 299
     inputs = tf.random_uniform((batch_size, height, width, 3))
     with slim.arg_scope(inception.inception_v3_arg_scope()):
         inception.inception_v3_base(inputs)
     total_params, _ = slim.model_analyzer.analyze_vars(
         slim.get_model_variables())
     self.assertAlmostEqual(21802784, total_params)
示例#18
0
def inception_arg_scope(weight_decay=0.00004,
                        use_batch_norm=True,
                        batch_norm_decay=0.9997,
                        batch_norm_epsilon=0.001,
                        activation_fn=tf.nn.relu):
    """Defines the default arg scope for inception models.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    use_batch_norm: "If `True`, batch_norm is applied after each convolution.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.
    activation_fn: Activation function for conv2d.

  Returns:
    An `arg_scope` to use for the inception models.
  """
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': batch_norm_decay,
        # epsilon to prevent 0s in variance.
        'epsilon': batch_norm_epsilon,
        # collection containing update_ops.
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
        # use fused batch norm if possible.
        'fused': None,
    }
    if use_batch_norm:
        normalizer_fn = slim.batch_norm
        normalizer_params = batch_norm_params
    else:
        normalizer_fn = None
        normalizer_params = {}
    # Set weight_decay for weights in Conv and FC layers.
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay)):
        with slim.arg_scope(
            [slim.conv2d],
                weights_initializer=slim.variance_scaling_initializer(),
                activation_fn=activation_fn,
                normalizer_fn=normalizer_fn,
                normalizer_params=normalizer_params) as sc:
            return sc
示例#19
0
 def testVariablesSetDeviceMobileModel(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     inputs = tf.random_uniform((batch_size, height, width, 3))
     tf.train.create_global_step()
     # Force all Variables to reside on the device.
     with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
         with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
             nasnet.build_nasnet_mobile(inputs, num_classes)
     with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
         with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
             nasnet.build_nasnet_mobile(inputs, num_classes)
     for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                scope='on_cpu'):
         self.assertDeviceEqual(v.device, '/cpu:0')
     for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                scope='on_gpu'):
         self.assertDeviceEqual(v.device, '/gpu:0')
示例#20
0
 def testModelHasExpectedNumberOfParameters(self):
     batch_size = 5
     height, width = 224, 224
     inputs = tf.random_uniform((batch_size, height, width, 3))
     with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                         normalizer_fn=slim.batch_norm):
         mobilenet_v1.mobilenet_v1_base(inputs)
         total_params, _ = slim.model_analyzer.analyze_vars(
             slim.get_model_variables())
         self.assertAlmostEqual(3217920, total_params)
示例#21
0
def cifarnet_arg_scope(weight_decay=0.004):
  """Defines the default cifarnet argument scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  with slim.arg_scope(
      [slim.conv2d],
      weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
      activation_fn=tf.nn.relu):
    with slim.arg_scope(
        [slim.fully_connected],
        biases_initializer=tf.constant_initializer(0.1),
        weights_initializer=trunc_normal(0.04),
        weights_regularizer=slim.l2_regularizer(weight_decay),
        activation_fn=tf.nn.relu) as sc:
      return sc
示例#22
0
def mobilenet_v1_arg_scope(is_training=True,
                           weight_decay=0.00004,
                           stddev=0.09,
                           regularize_depthwise=False):
    """Defines the default MobilenetV1 arg scope.

  Args:
    is_training: Whether or not we're training the model.
    weight_decay: The weight decay to use for regularizing the model.
    stddev: The standard deviation of the trunctated normal weight initializer.
    regularize_depthwise: Whether or not apply regularization on depthwise.

  Returns:
    An `arg_scope` to use for the mobilenet v1 model.
  """
    batch_norm_params = {
        'is_training': is_training,
        'center': True,
        'scale': True,
        'decay': 0.9997,
        'epsilon': 0.001,
    }

    # Set weight_decay for weights in Conv and DepthSepConv layers.
    weights_init = tf.truncated_normal_initializer(stddev=stddev)
    regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
    if regularize_depthwise:
        depthwise_regularizer = regularizer
    else:
        depthwise_regularizer = None
    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        weights_initializer=weights_init,
                        activation_fn=tf.nn.relu6,
                        normalizer_fn=slim.batch_norm):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_regularizer=regularizer):
                with slim.arg_scope(
                    [slim.separable_conv2d],
                        weights_regularizer=depthwise_regularizer) as sc:
                    return sc
示例#23
0
 def testBuildPreLogitsMobileModel(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = None
     inputs = tf.random_uniform((batch_size, height, width, 3))
     tf.train.create_global_step()
     with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
         net, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
     self.assertFalse('AuxLogits' in end_points)
     self.assertFalse('Predictions' in end_points)
     self.assertTrue(net.op.name.startswith('final_layer/Mean'))
     self.assertListEqual(net.get_shape().as_list(), [batch_size, 1056])
示例#24
0
def create_clones(config, model_fn, args=None, kwargs=None):
    """Creates multiple clones according to config using a `model_fn`.

  The returned values of `model_fn(*args, **kwargs)` are collected along with
  the scope and device used to created it in a namedtuple
  `Clone(outputs, scope, device)`

  Note: it is assumed that any loss created by `model_fn` is collected at
  the tf.GraphKeys.LOSSES collection.

  To recover the losses, summaries or update_ops created by the clone use:
  ```python
    losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
    summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
  ```

  The deployment options are specified by the config object and support
  deploying one or several clones on different GPUs and one or several replicas
  of such clones.

  The argument `model_fn` is called `config.num_clones` times to create the
  model clones as `model_fn(*args, **kwargs)`.

  If `config` specifies deployment on multiple replicas then the default
  tensorflow device is set appropriatly for each call to `model_fn` and for the
  slim variable creation functions: model and global variables will be created
  on the `ps` device, the clone operations will be on the `worker` device.

  Args:
    config: A DeploymentConfig object.
    model_fn: A callable. Called as `model_fn(*args, **kwargs)`
    args: Optional list of arguments to pass to `model_fn`.
    kwargs: Optional list of keyword arguments to pass to `model_fn`.

  Returns:
    A list of namedtuples `Clone`.
  """
    clones = []
    args = args or []
    kwargs = kwargs or {}
    with slim.arg_scope([slim.model_variable, slim.variable],
                        device=config.variables_device()):
        # Create clones.
        for i in range(0, config.num_clones):
            with tf.name_scope(config.clone_scope(i)) as clone_scope:
                clone_device = config.clone_device(i)
                with tf.device(clone_device):
                    with tf.variable_scope(tf.get_variable_scope(),
                                           reuse=True if i > 0 else None):
                        outputs = model_fn(*args, **kwargs)
                    clones.append(Clone(outputs, clone_scope, clone_device))
    return clones
示例#25
0
def block_inception_c(inputs, scope=None, reuse=None):
    """Builds Inception-C block for Inception v4 network."""
    # By default use stride=1 and SAME padding
    with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
                        stride=1,
                        padding='SAME'):
        with tf.variable_scope(scope, 'BlockInceptionC', [inputs],
                               reuse=reuse):
            with tf.variable_scope('Branch_0'):
                branch_0 = slim.conv2d(inputs,
                                       256, [1, 1],
                                       scope='Conv2d_0a_1x1')
            with tf.variable_scope('Branch_1'):
                branch_1 = slim.conv2d(inputs,
                                       384, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_1 = tf.concat(axis=3,
                                     values=[
                                         slim.conv2d(branch_1,
                                                     256, [1, 3],
                                                     scope='Conv2d_0b_1x3'),
                                         slim.conv2d(branch_1,
                                                     256, [3, 1],
                                                     scope='Conv2d_0c_3x1')
                                     ])
            with tf.variable_scope('Branch_2'):
                branch_2 = slim.conv2d(inputs,
                                       384, [1, 1],
                                       scope='Conv2d_0a_1x1')
                branch_2 = slim.conv2d(branch_2,
                                       448, [3, 1],
                                       scope='Conv2d_0b_3x1')
                branch_2 = slim.conv2d(branch_2,
                                       512, [1, 3],
                                       scope='Conv2d_0c_1x3')
                branch_2 = tf.concat(axis=3,
                                     values=[
                                         slim.conv2d(branch_2,
                                                     256, [1, 3],
                                                     scope='Conv2d_0d_1x3'),
                                         slim.conv2d(branch_2,
                                                     256, [3, 1],
                                                     scope='Conv2d_0e_3x1')
                                     ])
            with tf.variable_scope('Branch_3'):
                branch_3 = slim.avg_pool2d(inputs, [3, 3],
                                           scope='AvgPool_0a_3x3')
                branch_3 = slim.conv2d(branch_3,
                                       256, [1, 1],
                                       scope='Conv2d_0b_1x1')
            return tf.concat(axis=3,
                             values=[branch_0, branch_1, branch_2, branch_3])
示例#26
0
def test(dataset, checkpoint_file, result_path, config=None):
    """Test one sequence
	Args:
	dataset: Reference to a Dataset object instance
	checkpoint_path: Path of the checkpoint to use for the evaluation
	result_path: Path to save the output images
	config: Reference to a Configuration object used in the creation of a Session
	Returns:
	"""
    if config is None:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        # config.log_device_placement = True
        config.allow_soft_placement = True
    tf.logging.set_verbosity(tf.logging.INFO)

    # Input data
    batch_size = 1
    input_image = tf.placeholder(tf.float32, [batch_size, None, None, 3])

    # Create the cnn
    with slim.arg_scope(osvos_arg_scope()):
        net, end_points = osvos(input_image)
    probabilities = tf.nn.sigmoid(net)
    global_step = tf.Variable(0, name='global_step', trainable=False)

    # Create a saver to load the network
    saver = tf.train.Saver([
        v for v in tf.global_variables()
        if '-up' not in v.name and '-cr' not in v.name
    ])

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(interp_surgery(tf.global_variables()))
        saver.restore(sess, checkpoint_file)
        if not os.path.exists(result_path):
            os.makedirs(result_path)
        for frame in range(0, dataset.get_test_size()):
            img, curr_img = dataset.next_batch(batch_size, 'test')
            curr_frame = curr_img[0].split('/')[-1].split('.')[0] + '.png'
            image = preprocess_img(img[0])
            res = sess.run(probabilities, feed_dict={input_image: image})
            res_np = res.astype(np.float32)[0, :, :, 0] > 162.0 / 255.0
            # scipy.misc.imsave(os.path.join(result_path, curr_frame), res_np.astype(np.float32))
            # For windows
            scipy.misc.imsave(
                os.path.join(result_path,
                             curr_frame.split("\\")[-1]),
                res_np.astype(np.float32))
            print 'Saving ' + os.path.join(result_path, curr_frame)
示例#27
0
 def testUnknownBatchSizeMobileModel(self):
     batch_size = 1
     height, width = 224, 224
     num_classes = 1000
     with self.test_session() as sess:
         inputs = tf.placeholder(tf.float32, (None, height, width, 3))
         with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
             logits, _ = nasnet.build_nasnet_mobile(inputs, num_classes)
         self.assertListEqual(logits.get_shape().as_list(),
                              [None, num_classes])
         images = tf.random_uniform((batch_size, height, width, 3))
         sess.run(tf.global_variables_initializer())
         output = sess.run(logits, {inputs: images.eval()})
         self.assertEquals(output.shape, (batch_size, num_classes))
示例#28
0
 def testEvaluationMobileModel(self):
     batch_size = 2
     height, width = 224, 224
     num_classes = 1000
     with self.test_session() as sess:
         eval_inputs = tf.random_uniform((batch_size, height, width, 3))
         with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
             logits, _ = nasnet.build_nasnet_mobile(eval_inputs,
                                                    num_classes,
                                                    is_training=False)
         predictions = tf.argmax(logits, 1)
         sess.run(tf.global_variables_initializer())
         output = sess.run(predictions)
         self.assertEquals(output.shape, (batch_size, ))
示例#29
0
 def testFullyConvolutionalUnknownHeightWidth(self):
   batch = 2
   height, width = 65, 65
   global_pool = False
   inputs = create_test_input(batch, None, None, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
   self.assertListEqual(output.get_shape().as_list(),
                        [batch, None, None, 32])
   images = create_test_input(batch, height, width, 3)
   with self.test_session() as sess:
     sess.run(tf.global_variables_initializer())
     output = sess.run(output, {inputs: images.eval()})
     self.assertEqual(output.shape, (batch, 3, 3, 32))
示例#30
0
 def testClassificationShapes(self):
   global_pool = True
   num_classes = 10
   inputs = create_test_input(2, 224, 224, 3)
   with slim.arg_scope(resnet_utils.resnet_arg_scope()):
     _, end_points = self._resnet_small(inputs, num_classes,
                                        global_pool=global_pool,
                                        scope='resnet')
     endpoint_to_shape = {
         'resnet/block1': [2, 28, 28, 4],
         'resnet/block2': [2, 14, 14, 8],
         'resnet/block3': [2, 7, 7, 16],
         'resnet/block4': [2, 7, 7, 32]}
     for endpoint in endpoint_to_shape:
       shape = endpoint_to_shape[endpoint]
       self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
示例#31
0
 def testBuildLogitsLargeModel(self):
     batch_size = 5
     height, width = 331, 331
     num_classes = 1000
     inputs = tf.random_uniform((batch_size, height, width, 3))
     tf.train.create_global_step()
     with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
         logits, end_points = nasnet.build_nasnet_large(inputs, num_classes)
     auxlogits = end_points['AuxLogits']
     predictions = end_points['Predictions']
     self.assertListEqual(auxlogits.get_shape().as_list(),
                          [batch_size, num_classes])
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     self.assertListEqual(predictions.get_shape().as_list(),
                          [batch_size, num_classes])
示例#32
0
def osvos_arg_scope(weight_decay=0.0002):
    """Defines the OSVOS arg scope.
	Args:
	weight_decay: The l2 regularization coefficient.
	Returns:
	An arg_scope.
	"""
    with slim.arg_scope(
        [slim.conv2d, slim.convolution2d_transpose],
            activation_fn=tf.nn.relu,
            weights_initializer=tf.random_normal_initializer(stddev=0.001),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            biases_initializer=tf.zeros_initializer(),
            biases_regularizer=None,
            padding='SAME') as arg_sc:
        return arg_sc
示例#33
0
def gan_generator(images):
    wd = 0

    net = images

    with tf.variable_scope('generator'):

        with slim.arg_scope([slim.ops.conv2d, deconv2d], stddev=0.1, weight_decay=wd,
                            is_training=True):

            net = conv1 = slim.ops.conv2d(net, 32, [3, 3], batch_norm_params={}, scope='conv1')
            net = pool1 = slim.ops.max_pool(net, [2, 2], scope='pool1')

            net = conv2 = slim.ops.conv2d(net, 64, [3, 3], batch_norm_params={}, scope='conv2')
            net = pool2 = slim.ops.max_pool(net, [2, 2], scope='pool2')

            net = conv3 = slim.ops.conv2d(net, 128, [3, 3], batch_norm_params={}, scope='conv3')
            net = pool3 = slim.ops.max_pool(net, [2, 2], scope='pool3')

            net = conv4 = slim.ops.conv2d(net, 256, [3, 3], batch_norm_params={}, scope='conv4')
            # net = pool4 = slim.ops.max_pool(net, [2, 2], scope='pool4')

            # net = conv5 = slim.ops.conv2d(net, 128, [3, 3], batch_norm_params={}, scope='conv5')
            # net = pool5 = slim.ops.max_pool(net, [2, 2], scope='pool5')

            # print net.get_shape()

            # net = deconv2d(net, [3, 3], conv4.get_shape(), batch_norm_params={}, scope='deconv5')
            # print net.get_shape()

            # net = tf.concat(3, [net, conv4], name='concat4')
            net = deconv2d(net, [3, 3], conv3.get_shape(), batch_norm_params={}, scope='deconv4')
            # print net.get_shape()

            net = tf.concat(3, [net, conv3], name='concat3')
            net = deconv2d(net, [3, 3], conv2.get_shape(), batch_norm_params={}, scope='deconv3')
            # print net.get_shape()

            net = tf.concat(3, [net, conv2], name='concat2')
            net = deconv2d(net, [3, 3], images.get_shape(), scope='deconv2')
            # print net.get_shape()

            # net = tf.concat(3, [net, pool1], name='concat1')
            # net = deconv2d(net, [3, 3], images.get_shape(), activation=None, scope='deconv1')
            # print net.get_shape()

    return net