def mutual_info_p(a, b): """ Fast mutual information calculation based upon sklearn, but with estimation of uncertainty from Lin & Tegmark 2016 """ # contingency matrix of a * b contingency = contingency_matrix(a, b, sparse=True) nzx, nzy, Nall = sp.find(contingency) N = len(a) # entropy of a Na = np.ravel(contingency.sum(axis=0)) S_a, var_a = entropyp(Na / np.sum(Na)) # entropy of b Nb = np.ravel(contingency.sum(axis=1)) S_b, var_b = entropyp(Nb / np.sum(Nb)) # joint entropy S_ab, var_ab = entropyp(Nall / N) # mutual information MI = S_a + S_b - S_ab # uncertainty and variance of MI MI_var = var_a + var_b + var_ab uncertainty = np.sqrt((MI_var) / len(a)) return MI, uncertainty
def g_score(y_true, y_pred, eps=None, sparse=False): """ G-score Calculates the G-score: sqrt(prod(true_rates)). Parameters ---------- y_true: array-like, shape = [n_samples] Ground truth (correct) target values. y_pred: array-like, shape = [n_samples] Estimated targets as returned by a classifier. eps: None or float, optional. If a float, that value is added to all values in the contingency matrix. This helps to stop NaN propagation. If ``None``, nothing is adjusted. sparse: boolean, optional. If True, return a sparse CSR continency matrix. If ``eps is not None``, and ``sparse is True``, will throw ValueError. Returns ------- g: float G-score. """ y_true, y_pred = check_clusterings(y_true, y_pred) c = contingency_matrix(y_true, y_pred, eps=eps, sparse=sparse) d = c.diagonal() true_rates = d.reshape([len(d), 1]) / c.sum(axis=1).ravel() g = np.prod(true_rates)**(1.0 / c.shape[0]) return g
def rand_index_score(labels_true, labels_pred): # check clusterings labels_true, labels_pred = check_clusterings(labels_true, labels_pred) # initial statistics calculations n_samples = labels_true.shape[0] n_samples_comb = comb(n_samples, 2) n_classes = np.unique(labels_true).shape[0] n_clusters = np.unique(labels_pred).shape[0] class_freq = np.bincount(labels_true) cluster_freq = np.bincount(labels_pred) # Special limit cases: no clustering since the data is not split; # or trivial clustering where each document is assigned a unique cluster. # These are perfect matches hence return 1.0. if (n_classes == n_clusters == 1 or n_classes == n_clusters == 0 or n_classes == n_clusters == n_samples): return 1.0 # Compute the RI using the contingency data contingency = contingency_matrix(labels_true, labels_pred, sparse=True) sum_comb_c = sum((n_c**2) for n_c in cluster_freq) sum_comb_k = sum((n_k**2) for n_k in class_freq) sum_comb = sum((n_ij**2) for n_ij in contingency.data) return (1 + (sum_comb - 0.5 * sum_comb_k - 0.5 * sum_comb_c) / n_samples_comb)
def est_mutual_info_p(a, b): # contingency matrix of a * b contingency = contingency_matrix(a, b, sparse=True) nzx, nzy, Nall = sp.find(contingency) # entropy of a Na = np.ravel(contingency.sum(axis=0)) # number of A S_a, var_a = entropyp(Na) # entropy with P(A) as input # entropy of b Nb = np.ravel(contingency.sum(axis=1)) S_b, var_b = entropyp(Nb) # joint S_ab, var_ab = entropyp(Nall) # mutual information MI = S_a + S_b - S_ab # uncertainty and variance of MI MI_var = var_a + var_b + var_ab uncertainty = np.sqrt((MI_var) / len(a)) return MI, uncertainty
def mutual_info_score(labels_true, labels_pred, contingency=None): if contingency is None: labels_true, labels_pred = check_clusterings(labels_true, labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) else: contingency = check_array(contingency, accept_sparse=['csr', 'csc', 'coo'], dtype=[int, np.int32, np.int64]) if isinstance(contingency, np.ndarray): nzx, nzy = np.nonzero(contingency) nz_val = contingency[nzx, nzy] elif sp.issparse(contingency): nzx, nzy, nz_val = sp.find(contingency) else: raise ValueError("Unsupported type for 'contingency': %s" % type(contingency)) contingency_sum = contingency.sum() pi = np.ravel(contingency.sum(axis=1)) pj = np.ravel(contingency.sum(axis=0)) log_contingency_nm = np.log2(nz_val) contingency_nm = nz_val / contingency_sum outer = pi.take(nzx) * pj.take(nzy) log_outer = -np.log2(outer) + log2(pi.sum()) + log2(pj.sum()) mi = (contingency_nm * (log_contingency_nm - log2(contingency_sum)) + contingency_nm * log_outer) return mi.sum()
def joint_entropy(X, Y): N = float(len(X)) contingency = contingency_matrix(X, Y, sparse=True) nzx, nzy, Nall = sp.find(contingency) pAll = Nall / N S = -np.sum(pAll * np.log2(pAll)) var = np.var(-np.log2(pAll)) return S, var
def clustering_accuracy(labels_true, labels_pred): """Compute clustering accuracy.""" from sklearn.metrics.cluster import supervised from scipy.optimize import linear_sum_assignment labels_true, labels_pred = supervised.check_clusterings(labels_true, labels_pred) value = supervised.contingency_matrix(labels_true, labels_pred) [r, c] = linear_sum_assignment(-value) return value[r, c].sum() / len(labels_true)
def est_joint_entropy(labels_true, labels_pred): N = float(len(labels_true)) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) nzx, nzy, Nall = sp.find(contingency) pAll = np.array([Ni * scipy.special.psi(Ni) for Ni in Nall if Ni > 0]) S_hat = np.log2(N) - 1 / N * np.sum(pAll) return S_hat, np.var(1. / N * pAll)
def est_joint_entropy(X, Y): N = float(len(X)) contingency = contingency_matrix(X, Y, sparse=True) nzx, nzy, Nall = sp.find(contingency) pAll = np.array([Ni * scipy.special.psi(Ni) for Ni in Nall if Ni > 0]) S_hat = np.log2(N) - 1 / N * np.sum(pAll) var = np.var(scipy.special.psi(np.array(Nall, dtype="float32"))) return S_hat, var
def get_fpr(y_true, y_pred): n_samples = np.shape(y_true)[0] c = contingency_matrix(y_true, y_pred, sparse=True) tk = np.dot(c.data, np.transpose(c.data)) - n_samples # TP pk = np.sum(np.asarray(c.sum(axis=0)).ravel()**2) - n_samples # TP+FP qk = np.sum(np.asarray(c.sum(axis=1)).ravel()**2) - n_samples # TP+FN precision = 1. * tk / pk if tk != 0. else 0. recall = 1. * tk / qk if tk != 0. else 0. f = 2 * precision * recall / (precision + recall) if (precision + recall) != 0. else 0. return f, precision, recall
def normalized_mutual_info_score(labels_true, labels_pred): labels_true, labels_pred = check_clusterings(labels_true, labels_pred) classes = np.unique(labels_true) clusters = np.unique(labels_pred) if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64) mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) h_true, h_pred = entropy(labels_true), entropy(labels_pred) nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10) return nmi
def adjusted_mutual_info_score(labels_true, labels_pred): labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] classes = np.unique(labels_true) clusters = np.unique(labels_pred) if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information emi = expected_mutual_information(contingency, n_samples) # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) ami = (mi - emi) / (max(h_true, h_pred) - emi) return ami
def mutual_info_p(a, b, average_method="arithmetic", normalize=False): """ Fast mutual information calculation based upon sklearn, but with estimation of uncertainty from Lin & Tegmark 2016 """ # contingency matrix of a * b contingency = contingency_matrix(a, b, sparse=True) nzx, nzy, Nall = sp.find(contingency) N = len(a) # entropy of a Na = np.ravel(contingency.sum(axis=0)) S_a, var_a = entropyp(Na / np.sum(Na)) # entropy of b Nb = np.ravel(contingency.sum(axis=1)) S_b, var_b = entropyp(Nb / np.sum(Nb)) # joint entropy S_ab, var_ab = entropyp(Nall / N) # mutual information MI = S_a + S_b - S_ab # uncertainty and variance of MI MI_var = var_a + var_b + var_ab # normalization if normalize: # expected mutual information emi = expected_mutual_information(contingency, N) # normalization normalizer = _generalized_average(S_a, S_b, average_method) denominator = normalizer - emi if denominator < 0: denominator = min(denominator, -np.finfo("float64").eps) else: denominator = max(denominator, np.finfo("float64").eps) MI = (MI - emi) / denominator # this breaks MI_var - we would need to account for EMI MI_var = MI_var / denominator uncertainty = np.sqrt((MI_var) / len(a)) return MI, uncertainty
def adjusted_mutual_information(labels_true, labels_pred, n_jobs = -1, emi_method="parallel", use_cython=True, average_method='arithmetic'): """Adjusted Mutual Information. Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information (MI) score to account for chance. It accounts for the fact that the MI is generally higher for two clusterings with a larger number of clusters, regardless of whether there is actually more information shared. For two clusterings :math:`U` and :math:`V`, the AMI is given as:: AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))] """ n_samples = labels_true.shape[0] classes = np.unique(labels_true) clusters = np.unique(labels_pred) # Special limit cases: no clustering since the data is not split. # This is a perfect match hence return 1.0. if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64, **_astype_copy_false(contingency)) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information if emi_method == "parallel": emi = emi_parallel(contingency, n_samples, use_cython = use_cython, n_jobs=n_jobs) else: emi = expected_mutual_information(contingency, n_samples) # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) normalizer = _generalized_average(h_true, h_pred, average_method) denominator = normalizer - emi # Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match. # normalizer should always be >= emi, but because of floating-point # representation, sometimes emi is slightly larger. Correct this # by preserving the sign. if denominator < 0: denominator = min(denominator, -np.finfo('float64').eps) else: denominator = max(denominator, np.finfo('float64').eps) ami = (mi - emi) / denominator return ami, emi
def clustering_accuracy(labels_true, labels_pred): """Clustering Accuracy between two clusterings. Clustering Accuracy is a measure of the similarity between two labels of the same data. Assume that both labels_true and labels_pred contain n distinct labels. Clustering Accuracy is the maximum accuracy over all possible permutations of the labels, i.e. \max_{\sigma} \sum_i labels_true[i] == \sigma(labels_pred[i]) where \sigma is a mapping from the set of unique labels of labels_pred to the set of unique labels of labels_true. Clustering accuracy is one if and only if there is a permutation of the labels such that there is an exact match This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. Returns ------- accuracy : float return clustering accuracy in the range of [0, 1] """ labels_true, labels_pred = supervised.check_clusterings( labels_true, labels_pred) # value = supervised.contingency_matrix(labels_true, labels_pred, sparse=False) value = supervised.contingency_matrix(labels_true, labels_pred) [r, c] = linear_sum_assignment(-value) return value[r, c].sum() / len(labels_true)
def folkes_mallow_score(labels_true, labels_pred): """ Folkes&Mallow score computed according to: Ceccarelli, M. & Maratea, A. A "Fuzzy Extension of Some Classical Concordance Measures and an Efficient Algorithm for Their Computation" Knowledge-Based Intelligent Information and Engineering Systems, Springer Berlin Heidelberg, 2008, 5179, 755-763 :param labels_true: :param labels_pred: :return: """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] contingency = contingency_matrix(labels_true, labels_pred) cc = np.sum(contingency * contingency) N11 = (cc - n_samples) c1 = contingency.sum(axis=1) N01 = np.sum(c1 * c1) - cc c2 = contingency.sum(axis=0) N10 = np.sum(c2 * c2) - cc return (N11 * 1.0) / np.sqrt((N11 + N01) * (N11 + N10))
def folkes_mallow_score(labels_true, labels_pred): """ Folkes&Mallow score computed according to: Ceccarelli, M. & Maratea, A. A "Fuzzy Extension of Some Classical Concordance Measures and an Efficient Algorithm for Their Computation" Knowledge-Based Intelligent Information and Engineering Systems, Springer Berlin Heidelberg, 2008, 5179, 755-763 :param labels_true: :param labels_pred: :return: """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] contingency = contingency_matrix(labels_true, labels_pred) cc = np.sum(contingency * contingency) N11 = (cc - n_samples) c1 = contingency.sum(axis=1) N01 = np.sum(c1 * c1) - cc c2 = contingency.sum(axis=0) N10 = np.sum(c2 * c2) - cc return (N11*1.0)/np.sqrt((N11+N01)*(N11+N10))
def mutual_info_score(labels_true, labels_pred, contingency=None): """Mutual Information between two clusterings. The Mutual Information is a measure of the similarity between two labels of the same data. Where :math:`|U_i|` is the number of the samples in cluster :math:`U_i` and :math:`|V_j|` is the number of the samples in cluster :math:`V_j`, the Mutual Information between clusterings :math:`U` and :math:`V` is given as: .. math:: MI(U,V)=\sum_{i=1}^|U| \sum_{j=1}^|V| \\frac{|U_i\cap V_j|}{N} \log2\\frac{N|U_i \cap V_j|}{|U_i||V_j|} This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. contingency : {None, array, sparse matrix}, shape = [n_classes_true, n_classes_pred] A contingency matrix given by the :func:`contingency_matrix` function. If value is ``None``, it will be computed, otherwise the given value is used, with ``labels_true`` and ``labels_pred`` ignored. Returns ------- mi : float Mutual information, a non-negative value See also -------- adjusted_mutual_info_score: Adjusted against chance Mutual Information normalized_mutual_info_score: Normalized Mutual Information """ if contingency is None: labels_true, labels_pred = check_clusterings(labels_true, labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) else: contingency = check_array(contingency, accept_sparse=['csr', 'csc', 'coo'], dtype=[int, np.int32, np.int64]) if isinstance(contingency, np.ndarray): # For an array nzx, nzy = np.nonzero(contingency) nz_val = contingency[nzx, nzy] elif sp.issparse(contingency): # For a sparse matrix nzx, nzy, nz_val = sp.find(contingency) else: raise ValueError("Unsupported type for 'contingency': %s" % type(contingency)) contingency_sum = contingency.sum() pi = np.ravel(contingency.sum(axis=1)) pj = np.ravel(contingency.sum(axis=0)) log2_contingency_nm = np.log2(nz_val) contingency_nm = nz_val / contingency_sum # Don't need to calculate the full outer product, just for non-zeroes outer = pi.take(nzx) * pj.take(nzy) log2_outer = -np.log2(outer) + log2(pi.sum()) + log2(pj.sum()) mi = (contingency_nm * (log2_contingency_nm - log2(contingency_sum)) + contingency_nm * log2_outer) return mi.sum()
def normalized_mutual_info_score(labels_true, labels_pred): """Normalized Mutual Information between two clusterings. Normalized Mutual Information (NMI) is an normalization of the Mutual Information (MI) score to scale the results between 0 (no mutual information) and 1 (perfect correlation). In this function, mutual information is normalized by ``sqrt(H(labels_true) * H(labels_pred))`` This measure is not adjusted for chance. Therefore :func:`adjusted_mustual_info_score` might be preferred. This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. Returns ------- nmi : float score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling See also -------- adjusted_rand_score: Adjusted Rand Index adjusted_mutual_info_score: Adjusted Mutual Information (adjusted against chance) Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import normalized_mutual_info_score >>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 If classes members are completely split across different clusters, the assignment is totally in-complete, hence the NMI is null:: >>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) 0.0 """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) classes = np.unique(labels_true) clusters = np.unique(labels_pred) # Special limit cases: no clustering since the data is not split. # This is a perfect match hence return 1.0. if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10) return nmi
def ari_ci(clusters, trueclusters, alpha=0.05): ''' Steinley, Douglas, Michael J. Brusco, and Lawrence Hubert. "The variance of the adjusted Rand index." Psychological methods 21.2 (2016): 261. Parameters ---------- clusters : array, shape = [n_samples] Cluster labels to evaluate trueclusters : int array, shape = [n_samples] Ground truth class labels to be used as a reference alpha : float Alpha level for the normal approximation Returns ------- ari : float Adjusted Rand Index between -1.0 and 1.0. ari_variance : float variance of the ARI lowerci : float CI lower limit upperci : float CI upper limit ''' ct = contingency_matrix(clusters, trueclusters).astype(np.float) N = ct.sum() # precompute some vars ncomb = _comb2(N) ctsqsum = (ct**2).sum() rssq = sum(ct.sum(1)**2) # row sums sq cssq = sum(ct.sum(0)**2) # col sums sq # use same var names as in Steinley et al a = (ctsqsum - N) / 2.0 b = (rssq - ctsqsum) / 2.0 c = (cssq - ctsqsum) / 2.0 # col sums sq d = (ctsqsum + N**2 - rssq - cssq) / 2.0 e = 2.0 * rssq - (N + 1.0) * N f = 2.0 * cssq - (N + 1.0) * N g = 4.0 * sum(ct.sum(1)**3) - 4 * (N + 1.0) * rssq + (N + 1.0)**2 * N h = N * (N - 1.0) i = 4.0 * sum(ct.sum(0)**3) - 4.0 * (N + 1.0) * cssq + (N + 1.0)**2 * N var_aplusd = 1.0/16.0 * (2.0 * N * (N - 1.0) - ((e * f) / (N * (N - 1.0)))**2 + \ (4.0 * (g - h) * (i - h)) / (N * (N - 1.0) * (N - 2.0))) + \ 1.0/16.0 * (((e**2 - 4 * g + 2 * h) * (f**2 - 4.0 * i + 2.0 * h)) / \ (N * (N - 1.0) * (N - 2.0) * (N - 3.0))) ari_variance = (ncomb**2 * var_aplusd) / ((ncomb**2 - ((a + b) * (a + c) + (b + d) * (c + d)))**2) ari_std = np.sqrt(ari_variance) sum_comb_c = sum(_comb2(n_c) for n_c in np.ravel(ct.sum(axis=1))) sum_comb_k = sum(_comb2(n_k) for n_k in np.ravel(ct.sum(axis=0))) sum_comb = sum(_comb2(n_ij) for n_ij in np.ravel(ct)) prod_comb = (sum_comb_c * sum_comb_k) / ncomb mean_comb = (sum_comb_k + sum_comb_c) / 2. #compute ari ari = (sum_comb - prod_comb) / (mean_comb - prod_comb) #compute CI qnrm = norm.ppf(1.0 - alpha / 2.0) lowerci = ari - qnrm * ari_std upperci = ari + qnrm * ari_std return ari, ari_variance, lowerci, upperci
print(fea_hog.transpose().shape) #test_gamma = [.5,.55,.6,.65,.7,.75,.8,.85,.9,.95,1] #test = [1,2,3,4,5,6,7,8,9,10] # for x in test: gauss_mix = GaussianMixture(n_components=5).fit(fea_hog.transpose()) #print(spec_clust.get_params()) labels_pred = np.array(gauss_mix.labels_) #print(labels_pred.shape) labels, labels_pred = supervised.check_clusterings(labels, labels_pred) # labels_true : int array with ground truth labels, shape = [n_samples] # labels_pred : int array with estimated labels, shape = [n_samples] value = supervised.contingency_matrix(labels, labels_pred) # value : array of shape [n, n] whose (i, j)-th entry is the number of samples in true class i and in predicted class j [r, c] = linear_sum_assignment(-value) accr = value[r, c].sum() / len(labels) print('n_neighbors: ' + str(x) + ' accr: ' + str(accr)) # for x in range(0,20): # print(kmeans.labels_[x]) # for x in range(0,10): # print(data['fea_hog_train'][x])