def test_reuse_model(self, policy, is_training):
        config = saccader_config.get_config()
        num_times = 2
        image_shape = (224, 224, 3)
        num_classes = 10
        config.num_classes = num_classes
        config.num_times = num_times
        batch_size = 3
        images1 = tf.constant(np.random.rand(*((batch_size, ) + image_shape)),
                              dtype=tf.float32)
        model = saccader.Saccader(config)
        logits1 = model(images1,
                        num_times=num_times,
                        is_training=is_training,
                        policy=policy)[0]
        num_params = len(tf.all_variables())
        l2_loss1 = tf.losses.get_regularization_loss()
        # Build twice with different num_times and different batch size.
        images2 = tf.constant(np.random.rand(*((batch_size - 1, ) +
                                               image_shape)),
                              dtype=tf.float32)
        logits2 = model(images2,
                        num_times=num_times + 1,
                        is_training=is_training,
                        policy=policy)[0]
        l2_loss2 = tf.losses.get_regularization_loss()

        # Ensure variables are reused.
        self.assertLen(tf.all_variables(), num_params)
        init = tf.global_variables_initializer()
        self.evaluate(init)
        logits1, logits2 = self.evaluate((logits1, logits2))
        l2_loss1, l2_loss2 = self.evaluate((l2_loss1, l2_loss2))
        np.testing.assert_almost_equal(l2_loss1, l2_loss2, decimal=9)
 def test_locations(self, policy, is_training):
     config = saccader_config.get_config()
     num_times = 2
     image_shape = (224, 224, 3)
     num_classes = 10
     config.num_classes = num_classes
     config.num_times = num_times
     batch_size = 4
     images = tf.constant(np.random.rand(*((batch_size, ) + image_shape)),
                          dtype=tf.float32)
     model = saccader.Saccader(config)
     _, locations_t, _, _ = model(images,
                                  num_times=num_times,
                                  is_training=is_training,
                                  policy=policy)
     init_op = model.init_op
     self.evaluate(init_op)
     locations_t_ = self.evaluate(locations_t)
     # Locations should be different across time.
     print("HERE")
     print(np.abs(locations_t_[0] - locations_t_[1]).mean())
     print(locations_t_[0])
     print(locations_t_[1])
     print(policy)
     print(is_training)
     self.assertNotAlmostEqual(
         np.abs(locations_t_[0] - locations_t_[1]).mean(), 0.)
Example #3
0
 def test_build(self, policy, is_training):
   config = saccader_config.get_config()
   num_times = 2
   image_shape = (224, 224, 3)
   num_classes = 10
   config.num_classes = num_classes
   config.num_times = num_times
   batch_size = 3
   images = tf.constant(
       np.random.rand(*((batch_size,) + image_shape)), dtype=tf.float32)
   model = saccader.Saccader(config)
   logits = model(images, num_times=num_times, is_training=is_training,
                  policy=policy)[0]
   init_op = model.init_op
   self.evaluate(init_op)
   self.assertEqual((batch_size, num_classes),
                    self.evaluate(logits).shape)
Example #4
0
  def test_saccader_pretrain_loss(self):
    batch_size = 2
    num_classes = 1001
    images = tf.random_uniform(
        shape=(batch_size, 224, 224, 3), minval=-1, maxval=1, dtype=tf.float32)
    config = saccader_config.get_config()
    config.num_classes = num_classes

    model = saccader.Saccader(config)

    model(
        images,
        num_times=6,
        is_training=True,
        policy="learned")

    num_params = len(tf.all_variables())

    pretrain_loss = losses.saccader_pretraining_loss(
        model, images, is_training=True)

    # Test loss does not introduce new variables.
    self.assertLen(tf.all_variables(), num_params)

    # Gradients with respect to location variables should exist.
    for v, g in zip(model.var_list_location, tf.gradients(
        pretrain_loss, model.var_list_location)):
      if v.trainable:
        self.assertIsNotNone(g)

    # Gradients with respect to classification variables should be None.
    for g in tf.gradients(
        pretrain_loss, model.var_list_classification):
      self.assertIsNone(g)

    # Test evaluating the loss
    self.evaluate(tf.global_variables_initializer())
    self.evaluate(pretrain_loss)