def cal_dcc_scores(self, method='spearman', mean=True): r""" Same as D.C.I but use correlation matrix instead of importance matrix """ train, test = self.create_correlation_matrix(mean=mean, method=method, decode=False) train = np.abs(train) test = np.abs(test) d = (metrics.disentanglement_score(train) + metrics.disentanglement_score(test)) / 2. c = (metrics.completeness_score(train) + metrics.completeness_score(test)) / 2. return d, c
def cal_dcmi_scores(self, mean=True, n_neighbors=3): r""" The same method is used for D.C.I scores, however, this metrics use mutual information matrix (estimated by nearest neighbor method) instead of importance matrix Return: tuple of 2 scalars: - disentanglement score of mutual information - completeness score of mutual information """ train, test = self.create_mutualinfo_matrix(mean=mean, n_neighbors=n_neighbors) d = (metrics.disentanglement_score(train) + metrics.disentanglement_score(test)) / 2. c = (metrics.completeness_score(train) + metrics.completeness_score(test)) / 2. return d, c
def cal_dcd_scores(self, n_samples=1000, lognorm=True, n_components=2): r""" Same as D.C.I but use density matrix instead of importance matrix """ # smaller is better train, test = self.create_divergence_matrix(n_samples=n_samples, lognorm=lognorm, n_components=n_components, normalize_per_code=True, decode=False) # diag = np.diagflat(np.diag(density_mat)) # higher is better train = 1. - train test = 1 - test d = (metrics.disentanglement_score(train) + metrics.disentanglement_score(test)) / 2. c = (metrics.completeness_score(train) + metrics.completeness_score(test)) / 2. return d, c