Exemplo n.º 1
0
    def mutual_info_gap(
        self,
        n_bins: int = 10,
        strategy: Literal['uniform', 'quantile', 'kmeans', 'gmm'] = 'uniform',
    ) -> float:
        """Mutual Information Gap

    Parameters
    ----------
    n_bins : int, optional
        number of bins for discretizing the latents, by default 10
    strategy : {'uniform', 'quantile', 'kmeans', 'gmm'}
        Strategy used to define the widths of the bins.
        'uniform' - All bins in each feature have identical widths.
        'quantile' - All bins in each feature have the same number of points.
        'kmeans' - Values in each bin have the same nearest center of a 1D cluster.
        , by default 'uniform'

    Returns
    -------
    float
        mutual information gap score
    """
        z = self.dist_to_tensor(self.latents).numpy()
        if n_bins > 1:
            z = discretizing(z,
                             independent=True,
                             n_bins=n_bins,
                             strategy=strategy)
        f = self.factors
        return mutual_info_gap(z, f)
Exemplo n.º 2
0
  def cal_mutual_info_gap(self, mean=True):
    r"""
    Arguments:
      mean : a Boolean, if True use the mean of latent distribution for
        calculating the mutual information gap

    Return:
      a dictionary : {'mig': score}

    Reference:
      Chen, R.T.Q., Li, X., Grosse, R., Duvenaud, D., 2019. Isolating Sources of
        Disentanglement in Variational Autoencoders. arXiv:1802.04942 [cs, stat].
    """
    mig = []
    for z, f in zip(self._latent_codes(mean), self.factors):
      mig.append(metrics.mutual_info_gap(z, f))
    return dict(mig=np.mean(mig))