def entropy(y_true, y_pred): """SOM class distribution entropy measure. Parameters ---------- y_true : array, shape = [n] true labels. y_pred : array, shape = [n] predicted cluster ids. Returns ------- entropy : float (lower is better) References ---------- Elend, L., & Kramer, O. (2019). Self-Organizing Maps with Convolutional Layers. """ y_true = y_true.astype(np.int64) y_pred = y_pred.astype(np.int64) check_clusterings(y_true, y_pred) w = _contingency_matrix(y_true, y_pred) freqs = np.divide(w.max(axis=0) + 1e-12, w.sum(axis=0) + 1e-12) # relative frequencies of majority class return np.sum(-np.log(freqs))
def g_score(y_true, y_pred, eps=None, sparse=False): """ G-score Calculates the G-score: sqrt(prod(true_rates)). Parameters ---------- y_true: array-like, shape = [n_samples] Ground truth (correct) target values. y_pred: array-like, shape = [n_samples] Estimated targets as returned by a classifier. eps: None or float, optional. If a float, that value is added to all values in the contingency matrix. This helps to stop NaN propagation. If ``None``, nothing is adjusted. sparse: boolean, optional. If True, return a sparse CSR continency matrix. If ``eps is not None``, and ``sparse is True``, will throw ValueError. Returns ------- g: float G-score. """ y_true, y_pred = check_clusterings(y_true, y_pred) c = contingency_matrix(y_true, y_pred, eps=eps, sparse=sparse) d = c.diagonal() true_rates = d.reshape([len(d), 1]) / c.sum(axis=1).ravel() g = np.prod(true_rates)**(1.0 / c.shape[0]) return g
def rand_index_score(labels_true, labels_pred): # check clusterings labels_true, labels_pred = check_clusterings(labels_true, labels_pred) # initial statistics calculations n_samples = labels_true.shape[0] n_samples_comb = comb(n_samples, 2) n_classes = np.unique(labels_true).shape[0] n_clusters = np.unique(labels_pred).shape[0] class_freq = np.bincount(labels_true) cluster_freq = np.bincount(labels_pred) # Special limit cases: no clustering since the data is not split; # or trivial clustering where each document is assigned a unique cluster. # These are perfect matches hence return 1.0. if (n_classes == n_clusters == 1 or n_classes == n_clusters == 0 or n_classes == n_clusters == n_samples): return 1.0 # Compute the RI using the contingency data contingency = contingency_matrix(labels_true, labels_pred, sparse=True) sum_comb_c = sum((n_c**2) for n_c in cluster_freq) sum_comb_k = sum((n_k**2) for n_k in class_freq) sum_comb = sum((n_ij**2) for n_ij in contingency.data) return (1 + (sum_comb - 0.5 * sum_comb_k - 0.5 * sum_comb_c) / n_samples_comb)
def rand_index_score(labels_true, labels_pred): labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] n_classes = np.unique(labels_true).shape[0] n_clusters = np.unique(labels_pred).shape[0] # Special limit cases: no clustering since the data is not split; # or trivial clustering where each document is assigned a unique cluster. # These are perfect matches hence return 1.0. if (n_classes == n_clusters == 1 or n_classes == n_clusters == 0 or n_classes == n_clusters == n_samples): return 1.0 # Compute the RI using the contingency data contingency = contingency_matrix(labels_true, labels_pred) n = np.sum(np.sum(contingency)) t1 = comb(n, 2) t2 = np.sum(np.sum(np.power(contingency, 2))) nis = np.sum(np.power(np.sum(contingency, 0), 2)) njs = np.sum(np.power(np.sum(contingency, 1), 2)) t3 = 0.5 * (nis + njs) A = t1 + t2 - t3 nc = (n * (n**2 + 1) - (n + 1) * nis - (n + 1) * njs + 2 * (nis * njs) / n) / (2 * (n - 1)) AR = (A - nc) / (t1 - nc) return A / t1
def mutual_info_score(labels_true, labels_pred, contingency=None): if contingency is None: labels_true, labels_pred = check_clusterings(labels_true, labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) else: contingency = check_array(contingency, accept_sparse=['csr', 'csc', 'coo'], dtype=[int, np.int32, np.int64]) if isinstance(contingency, np.ndarray): nzx, nzy = np.nonzero(contingency) nz_val = contingency[nzx, nzy] elif sp.issparse(contingency): nzx, nzy, nz_val = sp.find(contingency) else: raise ValueError("Unsupported type for 'contingency': %s" % type(contingency)) contingency_sum = contingency.sum() pi = np.ravel(contingency.sum(axis=1)) pj = np.ravel(contingency.sum(axis=0)) log_contingency_nm = np.log2(nz_val) contingency_nm = nz_val / contingency_sum outer = pi.take(nzx) * pj.take(nzy) log_outer = -np.log2(outer) + log2(pi.sum()) + log2(pj.sum()) mi = (contingency_nm * (log_contingency_nm - log2(contingency_sum)) + contingency_nm * log_outer) return mi.sum()
def con_mutual_infor_score(x, y, z): ''' 计算条件互信息I(X;Y|Z) ''' label_x, label_y = sd.check_clusterings(x, y) label_z = z # 合并 labels_true, labels_pred data_union = pd.DataFrame({ "x_col": label_x, "y_col": label_y, "z_col": label_z }) """ 样本个数 data_N; x_N:x取值种类数 y_N:y取值种类数 z_N:y取值种类数 class_x :x类别 numpy.ndarray ['a1' 'a2'] class_y :y类别 class_z :z类别 Lap_smo_factor:拉普拉斯平滑 """ data_N = len(label_x) class_x = np.unique(label_x) class_y = np.unique(label_y) class_z = np.unique(label_z) x_N = len(class_x) y_N = len(class_y) z_N = len(class_z) xyz_N = x_N * y_N * z_N xz_N = x_N * z_N yz_N = x_N * z_N con_mu_infor_score = 0 Lap_smo_factor = 1 # ENT = entropy(labels_true,feature_number=feature_number) base = 2 for z in class_z: for x in class_x: for y in class_y: data_xyz = data_union[(data_union["y_col"] == y) & (data_union["x_col"] == x) & (data_union["z_col"] == z)] data_xz = data_union[(data_union["x_col"] == x) & (data_union["z_col"] == z)] data_yz = data_union[(data_union["y_col"] == y) & (data_union["z_col"] == z)] data_z = data_union[(data_union["z_col"] == z)] xyz_number = len(data_xyz) xz_number = len(data_xz) yz_number = len(data_yz) z_number = len(data_z) p_xyz = (xyz_number + Lap_smo_factor) / (data_N + xyz_N) p_xz = (xz_number + Lap_smo_factor) / (data_N + xz_N) p_yz = (yz_number + Lap_smo_factor) / (data_N + yz_N) p_z = (z_number + Lap_smo_factor) / (data_N + z_N) con_mu_infor_score = con_mu_infor_score + p_xyz * log( (p_xyz * p_z) / (p_xz * p_yz), base) return con_mu_infor_score
def clustering_accuracy(labels_true, labels_pred): """Compute clustering accuracy.""" from sklearn.metrics.cluster import supervised from scipy.optimize import linear_sum_assignment labels_true, labels_pred = supervised.check_clusterings(labels_true, labels_pred) value = supervised.contingency_matrix(labels_true, labels_pred) [r, c] = linear_sum_assignment(-value) return value[r, c].sum() / len(labels_true)
def mutual_information(label_x, label_y): ''' 计算互信息I(X;Y) Return de information gain obtained by splitting a numeric attribute in two according to cut_point :param dataset: pandas dataframe with a column for attribute values and a column for class :param cut_point: threshold at which to partition the numeric attribute :param feature_label: column label of the numeric attribute values in data :param class_label: column label of the array of instance classes :return: information gain of partition obtained by threshold cut_point label_x : int array, shape = [n_samples] A clustering of the data into disjoint subsets. label_x : array, shape = [n_samples] A clustering of the data into disjoint subsets. ''' # if not isinstance(dataset, pd.core.frame.DataFrame): # raise AttributeError('input dataset should be a pandas data frame') labels_true, labels_pred = sd.check_clusterings(label_x, label_y) # 合并 labels_true, labels_pred data_union = pd.DataFrame({"x_col": labels_true, "y_col": labels_pred}) # 计算H(x) feature_number = len(np.unique(labels_true)) """ 样本个数 data_N; x_N:x取值种类数 y_N:y取值种类数 class_x :x类别 numpy.ndarray ['a1' 'a2'] class_y :y类别 ENT_X_Y :条件熵H(X|Y) Lap_smo_factor:拉普拉斯平滑 """ data_N = len(labels_true) class_x = np.unique(labels_true) class_y = np.unique(labels_pred) x_N = len(class_x) y_N = len(class_y) xy_N = x_N * y_N ENT_X_Y = 0 Lap_smo_factor = 1 # ENT = entropy(labels_true,feature_number=feature_number) base = 2 for x in class_x: for y in class_y: data_xy = data_union[(data_union["y_col"] == y) & (data_union["x_col"] == x)] xy_number = len(data_xy) p_xy = (xy_number + Lap_smo_factor) / (data_N + xy_N) data_y = data_union[data_union["y_col"] == y] data_x = data_union[data_union["x_col"] == x] y_number = len(data_y) x_number = len(data_x) p_y = (y_number + Lap_smo_factor) / (data_N + y_N) p_x = (x_number + Lap_smo_factor) / (data_N + x_N) kl = p_xy * log(p_xy / (p_y * p_x), base) ENT_X_Y = ENT_X_Y + kl return ENT_X_Y
def variation_of_information_score(labels_true, labels_pred): """Variation of Information (Meila, 2003) """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) mutual = mutual_info_score(labels_true, labels_pred) e1 = entropy(labels_true) e2 = entropy(labels_pred) return e1 + e2 - (2* mutual)
def variation_of_information_score(labels_true, labels_pred): """Variation of Information (Meila, 2003) """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) mutual = mutual_info_score(labels_true, labels_pred) e1 = entropy(labels_true) e2 = entropy(labels_pred) return e1 + e2 - (2 * mutual)
def purity(y_true, y_pred): """Clustering purity. Parameters ---------- y_true : array, shape = [n] true labels. y_pred : array, shape = [n] predicted cluster ids. Returns ------- purity : float in [0,1] (higher is better) """ y_true = y_true.astype(np.int64) y_pred = y_pred.astype(np.int64) check_clusterings(y_true, y_pred) w = _contingency_matrix(y_true, y_pred) label_mapping = w.argmax(axis=0) y_pred_voted = np.array([label_mapping[y] for y in y_pred]) return accuracy_score(y_true, y_pred_voted)
def normalized_mutual_info_score(labels_true, labels_pred): labels_true, labels_pred = check_clusterings(labels_true, labels_pred) classes = np.unique(labels_true) clusters = np.unique(labels_pred) if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64) mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) h_true, h_pred = entropy(labels_true), entropy(labels_pred) nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10) return nmi
def clustering_accuracy(y_true, y_pred): """Unsupervised clustering accuracy. Can only be used if the number of target classes in y_true is equal to the number of clusters in y_pred. Parameters ---------- y_true : array, shape = [n] true labels. y_pred : array, shape = [n] predicted cluster ids. Returns ------- accuracy : float in [0,1] (higher is better) """ y_true = y_true.astype(np.int64) y_pred = y_pred.astype(np.int64) check_clusterings(y_true, y_pred) w = _contingency_matrix(y_true, y_pred).T ind = linear_assignment(w.max() - w) return np.sum([w[i, j] for i, j in ind]) / y_true.size
def mutual_infor_score(label_x, label_y): ''' 计算互信息I(X;Y) Return de information gain obtained by splitting a numeric attribute in two according to cut_point :param dataset: pandas dataframe with a column for attribute values and a column for class :param cut_point: threshold at which to partition the numeric attribute :param feature_label: column label of the numeric attribute values in data :param class_label: column label of the array of instance classes :return: information gain of partition obtained by threshold cut_point label_x : int array, shape = [n_samples] A clustering of the data into disjoint subsets. label_x : array, shape = [n_samples] A clustering of the data into disjoint subsets. ''' labels_true, labels_pred = sd.check_clusterings(label_x, label_y) # Merge labels_true and labels_pred data_union = pd.DataFrame({"x_col":labels_true,"y_col":labels_pred}) # compute H(x) feature_number= len(np.unique(labels_true)) ENT = entropy(labels_true) # ENT = entropy(labels_true,feature_number=feature_number) # """ 样本个数 data_N; x_N:x取值种类数 y_N:y取值种类数 class_x :x类别 numpy.ndarray ['a1' 'a2'] class_y :y类别 ENT_X_Y :条件熵H(X|Y) Lap_smo_factor:拉普拉斯平滑 """ data_N = len(labels_true) class_x=np.unique(labels_true) class_y = np.unique(labels_pred) x_N = len(class_x) y_N = len(class_y) ENT_X_Y = 0 Lap_smo_factor = 1 for y in class_y: dataXy = data_union[data_union["y_col"]==y] len_y = len(dataXy) data_x_col = np.asanyarray(dataXy["x_col"]) entXy = entropy(data_x_col) # entXy = entropy(data_x_col,feature_number=x_N) p_y = (len_y+Lap_smo_factor)/(y_N+data_N) ENT_X_Y = ENT_X_Y+entXy*p_y mi = ENT - ENT_X_Y return mi
def class_scatter_index(dist_fun, y_true, y_pred): """Class scatter index (CSI). Parameters ---------- dist_fun : function (k : int, l : int) => int distance function between units k and l on the map. y_true : array, shape = [n] true labels. y_pred : array, shape = [n] predicted cluster ids. Returns ------- csi : float (lower is better) References ---------- Elend, L., & Kramer, O. (2019). Self-Organizing Maps with Convolutional Layers. """ y_true = y_true.astype(np.int64) y_pred = y_pred.astype(np.int64) check_clusterings(y_true, y_pred) n_classes = y_true.max() + 1 n_units = y_pred.max() + 1 w = _contingency_matrix(y_true, y_pred) groups = np.zeros(n_classes, dtype=np.int64) for c in range(n_classes): connectivity = csr_matrix([[ 1 if dist_fun(k, l) == 1 else 0 for l in range(n_units) if w[c, l] > 0 ] for k in range(n_units) if w[c, k] > 0]) groups[c] = connected_components(csgraph=connectivity, directed=False, return_labels=False) return np.mean(groups)
def adjusted_mutual_info_score(labels_true, labels_pred): labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] classes = np.unique(labels_true) clusters = np.unique(labels_pred) if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information emi = expected_mutual_information(contingency, n_samples) # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) ami = (mi - emi) / (max(h_true, h_pred) - emi) return ami
def clustering_accuracy(labels_true, labels_pred): """Clustering Accuracy between two clusterings. Clustering Accuracy is a measure of the similarity between two labels of the same data. Assume that both labels_true and labels_pred contain n distinct labels. Clustering Accuracy is the maximum accuracy over all possible permutations of the labels, i.e. \max_{\sigma} \sum_i labels_true[i] == \sigma(labels_pred[i]) where \sigma is a mapping from the set of unique labels of labels_pred to the set of unique labels of labels_true. Clustering accuracy is one if and only if there is a permutation of the labels such that there is an exact match This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. Returns ------- accuracy : float return clustering accuracy in the range of [0, 1] """ labels_true, labels_pred = supervised.check_clusterings( labels_true, labels_pred) # value = supervised.contingency_matrix(labels_true, labels_pred, sparse=False) value = supervised.contingency_matrix(labels_true, labels_pred) [r, c] = linear_sum_assignment(-value) return value[r, c].sum() / len(labels_true)
def folkes_mallow_score(labels_true, labels_pred): """ Folkes&Mallow score computed according to: Ceccarelli, M. & Maratea, A. A "Fuzzy Extension of Some Classical Concordance Measures and an Efficient Algorithm for Their Computation" Knowledge-Based Intelligent Information and Engineering Systems, Springer Berlin Heidelberg, 2008, 5179, 755-763 :param labels_true: :param labels_pred: :return: """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] contingency = contingency_matrix(labels_true, labels_pred) cc = np.sum(contingency * contingency) N11 = (cc - n_samples) c1 = contingency.sum(axis=1) N01 = np.sum(c1 * c1) - cc c2 = contingency.sum(axis=0) N10 = np.sum(c2 * c2) - cc return (N11*1.0)/np.sqrt((N11+N01)*(N11+N10))
def folkes_mallow_score(labels_true, labels_pred): """ Folkes&Mallow score computed according to: Ceccarelli, M. & Maratea, A. A "Fuzzy Extension of Some Classical Concordance Measures and an Efficient Algorithm for Their Computation" Knowledge-Based Intelligent Information and Engineering Systems, Springer Berlin Heidelberg, 2008, 5179, 755-763 :param labels_true: :param labels_pred: :return: """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] contingency = contingency_matrix(labels_true, labels_pred) cc = np.sum(contingency * contingency) N11 = (cc - n_samples) c1 = contingency.sum(axis=1) N01 = np.sum(c1 * c1) - cc c2 = contingency.sum(axis=0) N10 = np.sum(c2 * c2) - cc return (N11 * 1.0) / np.sqrt((N11 + N01) * (N11 + N10))
def paired_precision_recall_fscore(labels_true, labels_pred): """Compute the pairwise variant of precision, recall and F-score. Precision is the ability not to label as positive a sample that is negative. The best value is 1 and the worst is 0. Recall is the ability to succesfully find all the positive samples. The best value is 1 and the worst is 0. F-score (Harmonic mean) can be thought as a weighted harmonic mean of the precision and recall, where an F-score reaches its best value at 1 and worst at 0. Parameters ---------- :param labels_true: 1d array containing the ground truth cluster labels. :param labels_pred: 1d array containing the predicted cluster labels. Returns ------- :return float precision: calculated precision :return float recall: calculated recall :return float f_score: calculated f_score Reference --------- Levin, Michael et al., "Citation-based bootstrapping for large-scale author disambiguation", Journal of the American Society for Information Science and Technology 63.5 (2012): 1030-1047. """ # Check that labels_* are 1d arrays and have the same size labels_true, labels_pred = check_clusterings(labels_true, labels_pred) # Check that input given is not the empty set if labels_true.shape == (0, ): raise ValueError("input labels must not be empty.") # Assigns each label to its own cluster default_clustering = range(len(labels_pred)) # Calculate precision numerator = _general_merge_distance(labels_true, labels_pred, fm=_zero, fs=mul) denominator = _general_merge_distance(default_clustering, labels_pred, fm=_zero, fs=mul) try: precision = 1.0 - numerator / denominator except ZeroDivisionError: precision = 1.0 # Calculate recall numerator = _general_merge_distance(labels_true, labels_pred, fm=mul, fs=_zero) denominator = _general_merge_distance(labels_true, default_clustering, fm=mul, fs=_zero) try: recall = 1.0 - numerator / denominator except ZeroDivisionError: recall = 1.0 # Calculate f_score # If both are zero (minimum score) then f_score is also zero if precision + recall == 0.0: f_score = 0.0 else: f_score = 2.0 * precision * recall / (precision + recall) return precision, recall, f_score
def normalized_mutual_info_score(labels_true, labels_pred): """Normalized Mutual Information between two clusterings. Normalized Mutual Information (NMI) is an normalization of the Mutual Information (MI) score to scale the results between 0 (no mutual information) and 1 (perfect correlation). In this function, mutual information is normalized by ``sqrt(H(labels_true) * H(labels_pred))`` This measure is not adjusted for chance. Therefore :func:`adjusted_mustual_info_score` might be preferred. This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. Returns ------- nmi : float score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling See also -------- adjusted_rand_score: Adjusted Rand Index adjusted_mutual_info_score: Adjusted Mutual Information (adjusted against chance) Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import normalized_mutual_info_score >>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) 1.0 >>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 If classes members are completely split across different clusters, the assignment is totally in-complete, hence the NMI is null:: >>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) 0.0 """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) classes = np.unique(labels_true) clusters = np.unique(labels_pred) # Special limit cases: no clustering since the data is not split. # This is a perfect match hence return 1.0. if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10) return nmi
def compute_clustering_statistics(X, labels_true, labels_pred): """Modified b3_precision_recall_fscore function from bear which returns also wrongly classified samples. Parameters ---------- :param X: array of the signatures to validate predictions :param labels_true: 1d array containing the ground truth cluster labels. :param labels_pred: 1d array containing the predicted cluster labels. Returns ------- :return float precision: calculated precision :return float recall: calculated recall :return float f_score: calculated f_score :return list(signature_uuid): list of signature uuids which were wrongly classified Reference --------- Amigo, Enrique, et al. "A comparison of extrinsic clustering evaluation metrics based on formal constraints." Information retrieval 12.4 (2009): 461-486. """ # Check that labels_* are 1d arrays and have the same size labels_true, labels_pred = check_clusterings(labels_true, labels_pred) # Check that input given is not the empty set if labels_true.shape == (0, ): raise ValueError("input labels must not be empty.") # Compute P/R/F scores n_samples = len(labels_true) true_clusters = { } # true cluster_id => set of sample indices in this cluster pred_clusters = {} # pred cluster_id => set of sample indices for i in range(n_samples): true_cluster_id = labels_true[i] pred_cluster_id = labels_pred[i] if true_cluster_id not in true_clusters: true_clusters[true_cluster_id] = set() if pred_cluster_id not in pred_clusters: pred_clusters[pred_cluster_id] = set() true_clusters[true_cluster_id].add(i) pred_clusters[pred_cluster_id].add(i) for cluster_id, cluster in true_clusters.items(): true_clusters[cluster_id] = frozenset(cluster) for cluster_id, cluster in pred_clusters.items(): pred_clusters[cluster_id] = frozenset(cluster) precision = 0.0 recall = 0.0 intersections = {} wrongly_classified_samples = set() for i in range(n_samples): pred_cluster_i = pred_clusters[labels_pred[i]] true_cluster_i = true_clusters[labels_true[i]] if (pred_cluster_i, true_cluster_i) in intersections: intersection = intersections[(pred_cluster_i, true_cluster_i)] else: intersection = pred_cluster_i.intersection(true_cluster_i) # checks for the samples which should be in the cluster and are not # and for the samples which shouldn't be in the cluster and they are in it. wrongly_classified_samples |= true_cluster_i.symmetric_difference( pred_cluster_i) intersections[(pred_cluster_i, true_cluster_i)] = intersection precision += len(intersection) / len(pred_cluster_i) recall += len(intersection) / len(true_cluster_i) precision /= n_samples recall /= n_samples f_score = 2 * precision * recall / (precision + recall) wrongly_classified_samples = [ X[sample][0]["signature_uuid"] for sample in wrongly_classified_samples ] return (precision, recall, f_score), wrongly_classified_samples
def mutual_info_score(labels_true, labels_pred, contingency=None): """Mutual Information between two clusterings. The Mutual Information is a measure of the similarity between two labels of the same data. Where :math:`|U_i|` is the number of the samples in cluster :math:`U_i` and :math:`|V_j|` is the number of the samples in cluster :math:`V_j`, the Mutual Information between clusterings :math:`U` and :math:`V` is given as: .. math:: MI(U,V)=\sum_{i=1}^|U| \sum_{j=1}^|V| \\frac{|U_i\cap V_j|}{N} \log2\\frac{N|U_i \cap V_j|}{|U_i||V_j|} This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. contingency : {None, array, sparse matrix}, shape = [n_classes_true, n_classes_pred] A contingency matrix given by the :func:`contingency_matrix` function. If value is ``None``, it will be computed, otherwise the given value is used, with ``labels_true`` and ``labels_pred`` ignored. Returns ------- mi : float Mutual information, a non-negative value See also -------- adjusted_mutual_info_score: Adjusted against chance Mutual Information normalized_mutual_info_score: Normalized Mutual Information """ if contingency is None: labels_true, labels_pred = check_clusterings(labels_true, labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) else: contingency = check_array(contingency, accept_sparse=['csr', 'csc', 'coo'], dtype=[int, np.int32, np.int64]) if isinstance(contingency, np.ndarray): # For an array nzx, nzy = np.nonzero(contingency) nz_val = contingency[nzx, nzy] elif sp.issparse(contingency): # For a sparse matrix nzx, nzy, nz_val = sp.find(contingency) else: raise ValueError("Unsupported type for 'contingency': %s" % type(contingency)) contingency_sum = contingency.sum() pi = np.ravel(contingency.sum(axis=1)) pj = np.ravel(contingency.sum(axis=0)) log2_contingency_nm = np.log2(nz_val) contingency_nm = nz_val / contingency_sum # Don't need to calculate the full outer product, just for non-zeroes outer = pi.take(nzx) * pj.take(nzy) log2_outer = -np.log2(outer) + log2(pi.sum()) + log2(pj.sum()) mi = (contingency_nm * (log2_contingency_nm - log2(contingency_sum)) + contingency_nm * log2_outer) return mi.sum()
print(fea_hog.transpose().shape) #test_gamma = [.5,.55,.6,.65,.7,.75,.8,.85,.9,.95,1] #test = [1,2,3,4,5,6,7,8,9,10] # for x in test: gauss_mix = GaussianMixture(n_components=5).fit(fea_hog.transpose()) #print(spec_clust.get_params()) labels_pred = np.array(gauss_mix.labels_) #print(labels_pred.shape) labels, labels_pred = supervised.check_clusterings(labels, labels_pred) # labels_true : int array with ground truth labels, shape = [n_samples] # labels_pred : int array with estimated labels, shape = [n_samples] value = supervised.contingency_matrix(labels, labels_pred) # value : array of shape [n, n] whose (i, j)-th entry is the number of samples in true class i and in predicted class j [r, c] = linear_sum_assignment(-value) accr = value[r, c].sum() / len(labels) print('n_neighbors: ' + str(x) + ' accr: ' + str(accr)) # for x in range(0,20): # print(kmeans.labels_[x]) # for x in range(0,10): # print(data['fea_hog_train'][x])
def b3_precision_recall_fscore(labels_true, labels_pred): """Compute the B^3 variant of precision, recall and F-score. Parameters ---------- :param labels_true: 1d array containing the ground truth cluster labels. :param labels_pred: 1d array containing the predicted cluster labels. Returns ------- :return float precision: calculated precision :return float recall: calculated recall :return float f_score: calculated f_score Reference --------- Amigo, Enrique, et al. "A comparison of extrinsic clustering evaluation metrics based on formal constraints." Information retrieval 12.4 (2009): 461-486. """ # Check that labels_* are 1d arrays and have the same size labels_true, labels_pred = check_clusterings(labels_true, labels_pred) # Check that input given is not the empty set if labels_true.shape == (0, ): raise ValueError( "input labels must not be empty.") # Compute P/R/F scores n_samples = len(labels_true) true_clusters = {} # true cluster_id => set of sample indices pred_clusters = {} # pred cluster_id => set of sample indices for i in range(n_samples): true_cluster_id = labels_true[i] pred_cluster_id = labels_pred[i] if true_cluster_id not in true_clusters: true_clusters[true_cluster_id] = set() if pred_cluster_id not in pred_clusters: pred_clusters[pred_cluster_id] = set() true_clusters[true_cluster_id].add(i) pred_clusters[pred_cluster_id].add(i) precision = 0.0 recall = 0.0 for i in range(n_samples): pred_cluster_i = pred_clusters[labels_pred[i]] true_cluster_i = true_clusters[labels_true[i]] intersection = pred_cluster_i.intersection(true_cluster_i) precision += len(intersection) / len(pred_cluster_i) recall += len(intersection) / len(true_cluster_i) precision /= n_samples recall /= n_samples if precision + recall == 0.0: f_score = 0.0 else: f_score = 2 * precision * recall / (precision + recall) return precision, recall, f_score
def paired_precision_recall_fscore(labels_true, labels_pred): """Compute the pairwise variant of precision, recall and F-score. Precision is the ability not to label as positive a sample that is negative. The best value is 1 and the worst is 0. Recall is the ability to succesfully find all the positive samples. The best value is 1 and the worst is 0. F-score (Harmonic mean) can be thought as a weighted harmonic mean of the precision and recall, where an F-score reaches its best value at 1 and worst at 0. Parameters ---------- :param labels_true: 1d array containing the ground truth cluster labels. :param labels_pred: 1d array containing the predicted cluster labels. Returns ------- :return float precision: calculated precision :return float recall: calculated recall :return float f_score: calculated f_score Reference --------- Levin, Michael et al., "Citation-based bootstrapping for large-scale author disambiguation", Journal of the American Society for Information Science and Technology 63.5 (2012): 1030-1047. """ # Check that labels_* are 1d arrays and have the same size labels_true, labels_pred = check_clusterings(labels_true, labels_pred) # Check that input given is not the empty set if labels_true.shape == (0, ): raise ValueError( "input labels must not be empty.") # Assigns each label to its own cluster default_clustering = range(len(labels_pred)) # Calculate precision numerator = _general_merge_distance(labels_true, labels_pred, fm=_zero, fs=mul) denominator = _general_merge_distance(default_clustering, labels_pred, fm=_zero, fs=mul) try: precision = 1.0 - numerator/denominator except ZeroDivisionError: precision = 1.0 # Calculate recall numerator = _general_merge_distance(labels_true, labels_pred, fm=mul, fs=_zero) denominator = _general_merge_distance(labels_true, default_clustering, fm=mul, fs=_zero) try: recall = 1.0 - numerator/denominator except ZeroDivisionError: recall = 1.0 # Calculate f_score # If both are zero (minimum score) then f_score is also zero if precision + recall == 0.0: f_score = 0.0 else: f_score = 2.0 * precision * recall / (precision + recall) return precision, recall, f_score
def b3_precision_recall_fscore(labels_true, labels_pred): """Compute the B^3 variant of precision, recall and F-score. Parameters ---------- :param labels_true: 1d array containing the ground truth cluster labels. :param labels_pred: 1d array containing the predicted cluster labels. Returns ------- :return float precision: calculated precision :return float recall: calculated recall :return float f_score: calculated f_score Reference --------- Amigo, Enrique, et al. "A comparison of extrinsic clustering evaluation metrics based on formal constraints." Information retrieval 12.4 (2009): 461-486. """ # Check that labels_* are 1d arrays and have the same size labels_true, labels_pred = check_clusterings(labels_true, labels_pred) # Check that input given is not the empty set if labels_true.shape == (0, ): raise ValueError("input labels must not be empty.") # Compute P/R/F scores n_samples = len(labels_true) true_clusters = {} # true cluster_id => set of sample indices pred_clusters = {} # pred cluster_id => set of sample indices for i in range(n_samples): true_cluster_id = labels_true[i] pred_cluster_id = labels_pred[i] if true_cluster_id not in true_clusters: true_clusters[true_cluster_id] = set() if pred_cluster_id not in pred_clusters: pred_clusters[pred_cluster_id] = set() true_clusters[true_cluster_id].add(i) pred_clusters[pred_cluster_id].add(i) precision = 0.0 recall = 0.0 for i in range(n_samples): pred_cluster_i = pred_clusters[labels_pred[i]] true_cluster_i = true_clusters[labels_true[i]] intersection = pred_cluster_i.intersection(true_cluster_i) precision += len(intersection) / len(pred_cluster_i) recall += len(intersection) / len(true_cluster_i) precision /= n_samples recall /= n_samples if precision + recall == 0.0: f_score = 0.0 else: f_score = 2 * precision * recall / (precision + recall) return precision, recall, f_score