def test_softmax2d(self): batch_size = 10 axis_dims = [50, 40, 30] x = tf.random.uniform([batch_size,]+axis_dims, minval=-1, maxval=1) # Sum probability should be equal to 1. sum_prob = tf.reduce_sum(utils.softmax2d(x), axis=[1, 2]) diff = tf.reduce_mean( tf.math.abs(sum_prob - tf.ones_like(sum_prob, dtype=tf.float32))) self.assertAlmostEqual(self.evaluate(diff), 0., 5)
def __call__(self, mixed_features2d, cell_state, logits2d, is_training=False, policy="learned"): """Builds Saccader cell. Args: mixed_features2d: 4-D Tensor of shape [batch, height, width, channels]. cell_state: 4-D Tensor of shape [batch, height, width, 1] with cell state. logits2d: 4-D Tensor of shape [batch, height, width, channels]. is_training: (Boolean) To indicate training or inference modes. policy: (String) 'learned': uses learned policy, 'random': uses random policy, or 'center': uses center look policy. Returns: logits: Model logits. cell_state: New cell state. endpoints: Dictionary with cell parameters. """ batch_size, height, width, channels = mixed_features2d.shape.as_list() reuse = True if self.var_list else False position_channels = utils.position_channels(mixed_features2d) variables_before = set(tf.global_variables()) with tf.variable_scope("saccader_cell", reuse=reuse): # Compute 2D weights of features across space. features_space_logits = tf.layers.dense( mixed_features2d, units=1, use_bias=False, name="attention_weights") / tf.math.sqrt(float(channels)) features_space_logits += (cell_state * -1.e5 ) # Mask used locations. features_space_weights = utils.softmax2d(features_space_logits) # Compute 1D weights of features across channels. features_channels_logits = tf.reduce_sum(mixed_features2d * features_space_weights, axis=[1, 2]) features_channels_weights = tf.nn.softmax(features_channels_logits, axis=1) # Compute location probability. locations_logits2d = tf.reduce_sum( (mixed_features2d * features_channels_weights[:, tf.newaxis, tf.newaxis, :]), axis=-1, keepdims=True) locations_logits2d += (cell_state * -1e5) # Mask used locations. locations_prob2d = utils.softmax2d(locations_logits2d) variables_after = set(tf.global_variables()) # Compute best locations. locations_logits = tf.reshape(locations_logits2d, (batch_size, -1)) all_positions = tf.reshape(position_channels, [batch_size, height * width, 2]) best_locations_labels = tf.argmax(locations_logits, axis=-1) best_locations = utils.batch_gather_nd(all_positions, best_locations_labels, axis=1) # Sample locations. if policy == "learned": if is_training: dist = tfp.distributions.Categorical(logits=locations_logits) locations_labels = dist.sample() # At training samples location from the learned distribution. locations = utils.batch_gather_nd(all_positions, locations_labels, axis=1) # Ensures range [-1., 1.] locations = tf.clip_by_value(locations, -1., 1) tf.logging.info("Sampling locations.") tf.logging.info( "==================================================") else: # At inference uses the mean value for the location. locations = best_locations locations_labels = best_locations_labels elif policy == "random": # Use random policy for location. locations = tf.random_uniform(shape=(batch_size, 2), minval=-1., maxval=1.) locations_labels = None elif policy == "center": # Use center look policy. locations = tf.zeros(shape=(batch_size, 2)) locations_labels = None # Update cell_state. cell_state += utils.onehot2d(cell_state, locations) cell_state = tf.clip_by_value(cell_state, 0, 1) ######################################################################### # Extract logits from the 2D logits. if self.soft_attention: logits = tf.reduce_sum(logits2d * locations_prob2d, axis=[1, 2]) else: logits = gather_2d(logits2d, locations) ############################################################ endpoints = {} endpoints["cell_outputs"] = { "locations": locations, "locations_labels": locations_labels, "best_locations": best_locations, "best_locations_labels": best_locations_labels, "locations_logits2d": locations_logits2d, "locations_prob2d": locations_prob2d, "cell_state": cell_state, "features_space_logits": features_space_logits, "features_space_weights": features_space_weights, "features_channels_logits": features_channels_logits, "features_channels_weights": features_channels_weights, "locations_logits": locations_logits, "all_positions": all_positions, } if not reuse: self.collect_variables(list(variables_after - variables_before)) return logits, cell_state, endpoints