Beispiel #1
0
    def testLossCostDecorated(self):
        params = {
            'trainable': True,
            'normalizer_fn': slim.batch_norm,
            'normalizer_params': {
                'scale': True
            }
        }

        with slim.arg_scope([slim.layers.conv2d], **params):
            image = tf.constant(0.0, shape=[1, 1, 1, NUM_CHANNELS])
            conv1 = slim.layers.conv2d(image,
                                       2, [1, 1],
                                       padding='SAME',
                                       scope='conv1')
        with self.cached_session():
            tf.global_variables_initializer().run()
            name_to_var = {v.op.name: v for v in tf.global_variables()}
            gamma1 = name_to_var['conv1/BatchNorm/gamma']
            gamma1.assign([1] * 2).eval()

        self.gamma_flop_reg = model_size_regularizer.GammaModelSizeRegularizer(
            [conv1.op],
            gamma_threshold=0.1,
            regularizer_decorator=dummy_decorator.DummyDecorator,
            decorator_parameters={'scale': 0.5})

        conv = self.get_conv('conv1')
        self.assertEqual(_coeff(conv) * 3 * 1, self.loss([conv]))
        self.assertEqual(_coeff(conv) * 2 * NUM_CHANNELS, self.cost([conv]))
Beispiel #2
0
  def testInceptionV2_TotalCost(self):
    conv_params = {
        'activation_fn': tf.nn.relu6,
        'weights_regularizer': contrib_layers.l2_regularizer(0.00004),
        'weights_initializer': tf.random_normal_initializer(stddev=0.03),
        'trainable': True,
        'biases_initializer': tf.constant_initializer(0.0),
        'normalizer_fn': contrib_layers.batch_norm,
        'normalizer_params': {
            'is_training': False,
            'decay': 0.9997,
            'scale': True,
            'epsilon': 0.001,
        }
    }

    tf.reset_default_graph()
    with slim.arg_scope([slim.layers.conv2d, slim.layers.separable_conv2d],
                        **conv_params):
      # Build model.
      image = tf.zeros([1, 224, 224, 3])
      net, _ = inception.inception_v2_base(image)
      logits = slim.layers.fully_connected(
          net,
          1001,
          activation_fn=None,
          scope='logits',
          weights_initializer=tf.random_normal_initializer(stddev=1e-3),
          biases_initializer=tf.constant_initializer(0.0))

    # Instantiate regularizers.
    flop_reg = flop_regularizer.GammaFlopsRegularizer(
        [logits.op], gamma_threshold=0.5)
    p100_reg = latency_regularizer.GammaLatencyRegularizer(
        [logits.op], gamma_threshold=0.5, hardware='P100')
    v100_reg = latency_regularizer.GammaLatencyRegularizer(
        [logits.op], gamma_threshold=0.5, hardware='V100')
    model_size_reg = model_size_regularizer.GammaModelSizeRegularizer(
        [logits.op], gamma_threshold=0.5)

    with self.cached_session():
      tf.global_variables_initializer().run()

    # Verify costs are expected.
    self.assertAllClose(3.86972e+09, flop_reg.get_cost())
    self.assertAllClose(517536.0, p100_reg.get_cost())
    self.assertAllClose(173330.453125, v100_reg.get_cost())
    self.assertAllClose(1.11684e+07, model_size_reg.get_cost())