Exemple #1
0
  def make_report(self, data):

    res = self.connect(data)
    exprs = AttrDict(loss=self._loss(data, res))
    exprs.update(self._report(data, res))

    for k, v in exprs.items():
      if not isinstance(v, tf.Tensor):
        exprs[k] = tf.convert_to_tensor(v)

    return exprs
  def _build(self, h, x, presence=None):
    """Builds the module.

    Args:
      h: Tensor of encodings of shape [B, n_enc_dims].
      x: Tensor of inputs of shape [B, n_points, n_input_dims]
      presence: Tensor of shape [B, n_points, 1] or None; if it exists, it
        indicates which input points exist.

    Returns:
      A bunch of stuff.
    """
    batch_size, n_input_points, _ = x.shape.as_list()
    res = AttrDict(
        dynamic_weights_l2=tf.constant(0.)
    )

    output_shapes = (
        [1],  # per-capsule presence
        [self._n_votes],  # per-vote-presence
        [self._n_votes],  # per-vote scale
        [self._n_votes, self._n_caps_dims]
    )

    splits = [np.prod(i).astype(np.int32) for i in output_shapes]
    n_outputs = sum(splits)
    batch_mlp = neural.BatchMLP([self._n_hiddens, self._n_hiddens, n_outputs],
                                use_bias=True)

    all_params = batch_mlp(h)
    all_params = tf.split(all_params, splits, -1)
    batch_shape = [batch_size, self._n_caps]
    all_params = [tf.reshape(i, batch_shape + s)
                  for (i, s) in zip(all_params, output_shapes)]

    def add_noise(tensor):
      return tf.random.uniform(tensor.shape, minval=-.5, maxval=.5) * 4.

    res.pres_logit_per_caps = add_noise(all_params[0])
    res.pres_logit_per_vote = add_noise(all_params[1])
    res.scale = tf.nn.softplus(all_params[2] + .5) + 1e-6
    res.vote_presence = (tf.nn.sigmoid(res.pres_logit_per_caps)
                         * tf.nn.sigmoid(res.pres_logit_per_vote))
    res.vote = all_params[3]

    for k, v in res.items():
      if v.shape.ndims > 0:
        res[k] = snt.MergeDims(1, 2)(v)

    likelihood = _capsule.OrderInvariantCapsuleLikelihood(self._n_votes,
                                                          res.vote, res.scale,
                                                          res.vote_presence)
    ll_res = likelihood(x, presence)
    res.update(ll_res._asdict())

    # post processing
    mixing_probs = tf.nn.softmax(ll_res.mixing_logits, 1)
    prior_mixing_log_prob = tf.log(1. / n_input_points)
    mixing_kl = mixing_probs * (ll_res.mixing_log_prob - prior_mixing_log_prob)
    mixing_kl = tf.reduce_mean(tf.reduce_sum(mixing_kl, -1))

    wins_per_caps = tf.one_hot(ll_res.is_from_capsule, depth=self._n_caps)

    if presence is not None:
      wins_per_caps *= tf.expand_dims(presence, -1)

    wins_per_caps = tf.reduce_sum(wins_per_caps, 1)

    has_any_wins = tf.to_float(tf.greater(wins_per_caps, 0))
    should_be_active = tf.to_float(tf.greater(wins_per_caps, 1))

    sparsity_loss = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=should_be_active, logits=res.pres_logit_per_caps)

    sparsity_loss = tf.reduce_sum(sparsity_loss * has_any_wins, -1)
    sparsity_loss = tf.reduce_mean(sparsity_loss)

    caps_presence_prob = tf.reduce_max(
        tf.reshape(res.vote_presence,
                   [batch_size, self._n_caps, self._n_votes]), 2)

    res.update(dict(
        mixing_kl=mixing_kl,
        sparsity_loss=sparsity_loss,
        caps_presence_prob=caps_presence_prob,
        mean_scale=tf.reduce_mean(res.scale)
    ))
    return res