コード例 #1
0
def saccader_pretraining_loss(model, images, is_training):
    """Saccader pretraining loss.

  Args:
    model: Callable saccader model object.
    images: (4D Tensor) input images.
    is_training: (Boolen) training or inference mode.

  Returns:
    Pretraining loss for the model location weights.
  """
    _, _, _, endpoints = model(images,
                               num_times=12,
                               is_training=is_training,
                               policy="learned",
                               stop_gradient_after_representation=True)

    location_scale = endpoints["location_scale"]
    logits2d = endpoints["logits2d"]
    locations_logits2d_t = endpoints["locations_logits2d_t"]
    batch_size, height, width, _ = logits2d.shape.as_list()
    num_times = len(locations_logits2d_t)
    target_locations_t = saccader.engineered_policies(
        images,
        logits2d,
        utils.position_channels(logits2d) * location_scale,
        model.glimpse_shape,
        num_times,
        policy="ordered_logits")

    one_hot_t = []
    for loc in target_locations_t:
        one_hot_t.append(
            tf.reshape(
                utils.onehot2d(logits2d,
                               tf.stop_gradient(loc) / location_scale),
                (batch_size, height * width)))

    locations_logits_t = [
        tf.reshape(locations_logits2d_t[t], (batch_size, height * width))
        for t in range(num_times)
    ]
    pretrain_loss = tf.reduce_mean(
        tf.losses.softmax_cross_entropy(onehot_labels=tf.concat(one_hot_t,
                                                                axis=0),
                                        logits=tf.concat(locations_logits_t,
                                                         axis=0),
                                        loss_collection=None,
                                        reduction=tf.losses.Reduction.NONE))
    return pretrain_loss
コード例 #2
0
    def test_onehot2d(self):
        locations = [[-1, -1], [-1, 1], [1, -1], [1, 1]]
        batch_size = len(locations)
        tensor = _construct_images(batch_size)

        onehot = self.evaluate(
            utils.onehot2d(tensor,
                           offsets=tf.constant(locations, dtype=tf.float32)))

        h, w = tensor.shape.as_list()[1:3]
        for i, loc in enumerate(locations):
            # Check edges equal to 1.
            ix_h = 0 if loc[0] == -1 else (h - 1)
            ix_w = 0 if loc[1] == -1 else (w - 1)
            self.assertEqual(onehot[i, ix_h, ix_w, 0], 1)

            # Chech sum2d equal to 1.
            self.assertEqual(onehot[i].sum(), 1)
コード例 #3
0
    def __call__(self,
                 mixed_features2d,
                 cell_state,
                 logits2d,
                 is_training=False,
                 policy="learned"):
        """Builds Saccader cell.

    Args:
      mixed_features2d: 4-D Tensor of shape [batch, height, width, channels].
      cell_state: 4-D Tensor of shape [batch, height, width, 1] with cell state.
      logits2d: 4-D Tensor of shape [batch, height, width, channels].
      is_training: (Boolean) To indicate training or inference modes.
      policy: (String) 'learned': uses learned policy, 'random': uses random
        policy, or 'center': uses center look policy.
    Returns:
      logits: Model logits.
      cell_state: New cell state.
      endpoints: Dictionary with cell parameters.
    """
        batch_size, height, width, channels = mixed_features2d.shape.as_list()
        reuse = True if self.var_list else False
        position_channels = utils.position_channels(mixed_features2d)

        variables_before = set(tf.global_variables())
        with tf.variable_scope("saccader_cell", reuse=reuse):
            # Compute 2D weights of features across space.
            features_space_logits = tf.layers.dense(
                mixed_features2d,
                units=1,
                use_bias=False,
                name="attention_weights") / tf.math.sqrt(float(channels))

            features_space_logits += (cell_state * -1.e5
                                      )  # Mask used locations.
            features_space_weights = utils.softmax2d(features_space_logits)

            # Compute 1D weights of features across channels.
            features_channels_logits = tf.reduce_sum(mixed_features2d *
                                                     features_space_weights,
                                                     axis=[1, 2])
            features_channels_weights = tf.nn.softmax(features_channels_logits,
                                                      axis=1)

            # Compute location probability.
            locations_logits2d = tf.reduce_sum(
                (mixed_features2d *
                 features_channels_weights[:, tf.newaxis, tf.newaxis, :]),
                axis=-1,
                keepdims=True)

            locations_logits2d += (cell_state * -1e5)  # Mask used locations.
            locations_prob2d = utils.softmax2d(locations_logits2d)

        variables_after = set(tf.global_variables())
        # Compute best locations.
        locations_logits = tf.reshape(locations_logits2d, (batch_size, -1))
        all_positions = tf.reshape(position_channels,
                                   [batch_size, height * width, 2])

        best_locations_labels = tf.argmax(locations_logits, axis=-1)
        best_locations = utils.batch_gather_nd(all_positions,
                                               best_locations_labels,
                                               axis=1)

        # Sample locations.
        if policy == "learned":
            if is_training:
                dist = tfp.distributions.Categorical(logits=locations_logits)
                locations_labels = dist.sample()
                # At training samples location from the learned distribution.
                locations = utils.batch_gather_nd(all_positions,
                                                  locations_labels,
                                                  axis=1)
                # Ensures range [-1., 1.]
                locations = tf.clip_by_value(locations, -1., 1)
                tf.logging.info("Sampling locations.")
                tf.logging.info(
                    "==================================================")
            else:
                # At inference uses the mean value for the location.
                locations = best_locations
                locations_labels = best_locations_labels
        elif policy == "random":
            # Use random policy for location.
            locations = tf.random_uniform(shape=(batch_size, 2),
                                          minval=-1.,
                                          maxval=1.)
            locations_labels = None
        elif policy == "center":
            # Use center look policy.
            locations = tf.zeros(shape=(batch_size, 2))
            locations_labels = None

        # Update cell_state.
        cell_state += utils.onehot2d(cell_state, locations)
        cell_state = tf.clip_by_value(cell_state, 0, 1)
        #########################################################################
        # Extract logits from the 2D logits.
        if self.soft_attention:
            logits = tf.reduce_sum(logits2d * locations_prob2d, axis=[1, 2])
        else:
            logits = gather_2d(logits2d, locations)
        ############################################################
        endpoints = {}
        endpoints["cell_outputs"] = {
            "locations": locations,
            "locations_labels": locations_labels,
            "best_locations": best_locations,
            "best_locations_labels": best_locations_labels,
            "locations_logits2d": locations_logits2d,
            "locations_prob2d": locations_prob2d,
            "cell_state": cell_state,
            "features_space_logits": features_space_logits,
            "features_space_weights": features_space_weights,
            "features_channels_logits": features_channels_logits,
            "features_channels_weights": features_channels_weights,
            "locations_logits": locations_logits,
            "all_positions": all_positions,
        }
        if not reuse:
            self.collect_variables(list(variables_after - variables_before))

        return logits, cell_state, endpoints