classes = final_dict[key] randval = newmeme.rand_index_score(test_labels, classes.astype(int)) jacardkmean1 = jaccard_similarity_score(test_labels, classes.astype(int)) print('randval', randval) print('jacard', jacardkmean1) print('final', final_dict) result1 = load_iris() result1 = result1['data'] result1 = np.loadtxt(open( "C:\personal\PhD\Dataset\Anomaly\Libras\\real_6.csv", "r"), delimiter=",") kmeans1 = KMeans(n_clusters=15, random_state=0).fit(result1) # print("kmeans without meme", confusion_matrix(test_labels, kmeans1.labels_)) print('kmeans level', kmeans1.labels_) randkmean1 = newmeme.rand_index_score(test_labels, kmeans1.labels_) print('rand', randkmean1) jacardkmean1 = jaccard_similarity_score(test_labels, kmeans1.labels_) print('jacard', jacardkmean1) score = obj.calculate_silhoutte("", kmeans1.labels_) print('score here', score) D1 = pairwise_distances(result1, metric='euclidean') M1, C1 = kMedoids(D1, 15) kmediodlabels1 = convertLabelsToList(M1, C1, 360) print('kmediod level', kmediodlabels1) randkmediod1 = newmeme.rand_index_score(test_labels, kmediodlabels1) jacardkmediod1 = jaccard_similarity_score(test_labels, kmediodlabels1) print('randkmediod1', randkmediod1) print('jacardkmediod1', jacardkmediod1)
classes=final_dict[key] randval=newmeme.rand_index_score(test_labels,classes.astype(int)) jacardkmean1 = jaccard_similarity_score(test_labels, classes.astype(int)) print('randval',randval) print('jacard', jacardkmean1) print('final', final_dict) result1 = np.loadtxt(open("C:\personal\PhD\Dataset\sonar-data-set\glass-classification\\glass.csv", "r"), delimiter=",") kmeans1 = KMeans(n_clusters=6, random_state=0).fit(result1) # print("kmeans without meme", confusion_matrix(test_labels, kmeans1.labels_)) print('kmeans level',kmeans1.labels_) randkmean1 = newmeme.rand_index_score(test_labels, kmeans1.labels_) print('rand',randkmean1) jacardkmean1 = jaccard_similarity_score(test_labels, kmeans1.labels_) print('jacard',jacardkmean1) D1 = pairwise_distances(result1, metric='euclidean') M1, C1 = kMedoids(D1, 6) kmediodlabels1 = convertLabelsToList(M1, C1, 214) print('kmediod level', kmediodlabels1) randkmediod1 = newmeme.rand_index_score(test_labels, kmediodlabels1) jacardkmediod1 = jaccard_similarity_score(test_labels, kmediodlabels1) print('randkmediod1',randkmediod1) print('jacardkmediod1',jacardkmediod1)
tp_plus_fn = comb(np.bincount(classes), 2).sum() A = np.c_[(clusters, classes)] tp = sum( comb(np.bincount(A[A[:, 0] == i, 1]), 2).sum() for i in set(clusters)) fp = tp_plus_fp - tp fn = tp_plus_fn - tp tn = comb(len(A), 2) - tp - fp - fn return (tp + tn) / (tp + fp + fn + tn) if __name__ == '__main__': rand.random(0.0001, 0.0010) result = readCsvFile() result = result.transpose() D = pairwise_distances(result, metric='euclidean') M, C = kMedoids(D, 2) kmediodlabels = convertLabelsToList(M, C) print("lables k mediod", kmediodlabels) kmeans = KMeans(n_clusters=2, random_state=0).fit(result) print(kmeans.labels_) test_labels = np.zeros(208, int) test_labels[0:97] = np.int(1) print("kmeans", confusion_matrix(test_labels, kmeans.labels_)) randkmean = rand_index_score(test_labels, kmeans.labels_) jacardkmean = jaccard_similarity_score(test_labels, kmeans.labels_) siltkmean = silhouette_score(D, kmeans.labels_, "precomputed") print("kemans rand", randkmean) print("jacardkmean", jacardkmean) print("silthkmean", siltkmean) entrophygen = Entrophy.computeEntophy(test_labels, kmeans.labels_) print('entrophy', entrophygen)