Ejemplo n.º 1
0
 def inference(self, features, outputs, is_train):
     """Inference for targeting ybar"""
     if not is_train:
         return super().inference(features, outputs, is_train)
     sens_attr = tf.cast(tf.squeeze(features['sensitive'], -1),
                         dtype=tf.int32)
     out_int = tf.cast(tf.squeeze(outputs, -1), dtype=tf.int32)
     # likelihood for y=1
     lik1 = tf.squeeze(tf.nn.sigmoid(self._logits(features)), axis=-1)
     # likelihood for y=0
     lik0 = 1 - lik1
     lik = tf.stack((lik0, lik1), axis=-1)
     debias = self._debiasing_parameters()
     # `debias` has the shape (y, s, y'). we stack output and sensitive to (batch_size, 2)
     # then we use the last 2 values of that as indices for `debias`
     # shape of debias_per_example: (batch_size, output_dim, 2)
     debias_per_example = tft.gather_nd(
         debias, tf.stack((out_int, sens_attr), axis=-1))
     weighted_lik = debias_per_example * lik
     log_cond_prob = tfm.log(tf.reduce_sum(weighted_lik, axis=-1))
     regr_loss = -tf.reduce_mean(log_cond_prob)
     l2_loss = self._l2_loss()
     return ({
         'loss': regr_loss + l2_loss,
         'regr_loss': regr_loss,
         'l2_loss': l2_loss
     }, self._trainable_variables())
Ejemplo n.º 2
0
 def update(self, features, labels, pred_mean):
     test_for_ybar0_s1 = tfm.logical_and(
         tfm.equal(features['ybar'], 0), tfm.equal(features['sensitive'],
                                                   1))
     accepted = tft.gather_nd(tf.cast(pred_mean < 0.5, tf.float32),
                              tf.where(test_for_ybar0_s1))
     return self._return_and_store(self.mean(accepted))
Ejemplo n.º 3
0
    def _build_ell(self, weights, means, chol_covars, inducing_inputs, kernel_chol, features,
                   outputs, is_train):
        """Construct the Expected Log Likelihood

        Args:
            weights: (num_components,)
            means: shape: (num_components, num_latents, num_inducing)
            chol_covars: shape: (num_components, num_latents, num_inducing[, num_inducing])
            inducing_inputs: (num_latents, num_inducing, input_dim)
            kernel_chol: (num_latents, num_inducing, num_inducing)
            inputs: (batch_size, input_dim)
            outputs: (batch_size, num_latents)
            is_train: True if we're training, False otherwise
        Returns:
            Expected log likelihood as scalar
        """
        if self.args['s_as_input']:
            inputs = tf.concat((features['input'], features['sensitive']), axis=1)
        else:
            inputs = features['input']

        kern_prods, kern_sums = self._build_interim_vals(kernel_chol, inducing_inputs, inputs)
        # shape of `latent_samples`: (num_components, num_samples, batch_size, num_latents)
        latent_samples = self._build_samples(kern_prods, kern_sums, means, chol_covars)
        if is_train:
            sens_attr = tf.cast(tf.squeeze(features['sensitive'], -1), dtype=tf.int32)
            out_int = tf.cast(tf.squeeze(outputs, -1), dtype=tf.int32)
            log_lik0 = self.lik.log_cond_prob(tf.zeros_like(outputs), latent_samples)
            log_lik1 = self.lik.log_cond_prob(tf.ones_like(outputs), latent_samples)
            log_lik = tf.stack((log_lik0, log_lik1), axis=-1)
            debias = self._debiasing_parameters()
            # `debias` has the shape (y, s, y'). we stack output and sensitive to (batch_size, 2)
            # then we use the last 2 values of that as indices for `debias`
            # shape of debias_per_example: (batch_size, output_dim, 2)
            debias_per_example = tft.gather_nd(debias, tf.stack((out_int, sens_attr), axis=-1))
            weighted_lik = debias_per_example * tf.exp(log_lik)
            log_cond_prob = tfm.log(tf.reduce_sum(weighted_lik, axis=-1))
        else:
            log_cond_prob = self.lik.log_cond_prob(outputs, latent_samples)
        ell_by_component = tf.reduce_sum(log_cond_prob, axis=[1, 2])

        # weighted sum of the components
        ell = util.mul_sum(weights, ell_by_component)
        return ell / self.args['num_samples']
Ejemplo n.º 4
0
 def update(self, features, labels, pred_mean):
     test_for_ybar1_s0 = tfm.logical_and(
         tfm.equal(features['ybar'], 1), tfm.equal(features['sensitive'],
                                                   0))
     accepted = tft.gather_nd(labels, tf.where(test_for_ybar1_s0))
     return self._return_and_store(self.mean(accepted))
Ejemplo n.º 5
0
 def update(self, features, labels, pred_mean):
     accepted = tft.gather_nd(labels,
                              tf.where(tfm.equal(features['sensitive'], 1)))
     return self._return_and_store(self.mean(accepted))
Ejemplo n.º 6
0
 def update(self, features, labels, pred_mean):
     accepted = tft.gather_nd(tf.cast(pred_mean > 0.5, tf.float32),
                              tf.where(tfm.equal(features['sensitive'], 0)))
     return self._return_and_store(self.mean(accepted))