コード例 #1
0
def evalComparativaAccuracy(data, clusterDict, labels, possibilities):

	predicted_labels = []
	for verbG in labels: #gold
		for claseAuto, verbs in clusterDict.iteritems():
			if verbG in verbs:
				predicted_labels.append(claseAuto)
				break

	scores = numpy.array([0.0])
	for p in possibilities:
		true_labels = []
		setPos = set(p)
		mapped = {pos:i for i, pos in enumerate(setPos)}
		for pos in p:
			true_labels.append(mapped[pos])

		ac = accuracy(true_labels, predicted_labels)
		contingency_matrix = metrics.cluster.contingency_matrix(true_labels, predicted_labels)
		purity = numpy.sum(numpy.amax(contingency_matrix, axis=0)) / float(numpy.sum(contingency_matrix)) 

		fm = (2*ac*purity)/float(ac+purity)
		scores += [fm]

	return {data : scores/len(possibilities)}
コード例 #2
0
 def _KmeansAcc(self):
     kmeans = KMeans(n_clusters=self.classes, n_jobs=-1, n_init=10)
     AE = self.model.layers[2]
     rindex = np.random.randint(0, self.y_val.shape[0], size=1000)
     outs = AE.predict(self.x_val[rindex])
     y_pred = kmeans.fit_predict(outs)
     acc = accuracy(self.y_val[rindex], y_pred)
     nmi = normalized_mutual_info_score(self.y_val[rindex], y_pred)
     ari = adjusted_rand_score(self.y_val[rindex], y_pred)
     return (acc, nmi, ari)
コード例 #3
0
def execute_algo(model, model_name, X, y, verbose=True):
    print("##############\n# {}\n##############".format(model_name))
    model.fit(X)
    res_nmi = nmi(model.row_labels_, y)
    res_ari = ari(model.row_labels_, y)
    res_acc = accuracy(model.row_labels_, y)
    if verbose:
        print("NMI =", res_nmi)
        print("ARI =", res_ari)
        print("ACC =", res_acc)
    return res_nmi, res_ari, res_acc
コード例 #4
0
ファイル: varios.py プロジェクト: Romanici/TFG-Estadistica
def metrics(y, y_pred):

    import numpy as np
    from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score

    nmi = normalized_mutual_info_score(y, y_pred)
    ari = adjusted_rand_score(y, y_pred)

    from coclust.evaluation.external import accuracy
    acc = accuracy(y, y_pred)

    return acc, nmi, ari
def eval_labels(x, y, labels):
    # acc = cluster_acc(y, labels)
    acc = external.accuracy(y, labels)
    nmi = metrics.adjusted_mutual_info_score(y,
                                             labels,
                                             average_method='geometric')
    adj = metrics.adjusted_rand_score(y, labels)
    sil = metrics.silhouette_score(x, labels)
    db = metrics.davies_bouldin_score(x, labels)
    fm = metrics.fowlkes_mallows_score(y, labels)
    cont_matrix = metrics.cluster.contingency_matrix(labels, y)
    print(cont_matrix)
    print("acc", acc)
    print('nmi', nmi)
    print('adj', adj)
    print('sil', sil)
    print('db', db)
    print('fm:', fm)
    return acc, nmi, adj, sil, db, fm
コード例 #6
0

plt.show()


# In[ ]:


#Kmeans results
print('\n\n++ Kmeans Results')
#print('Number of clusters: ', number_of_clusters)
#print('Numer of iterations: ' % n_init) # number of iterations to run with different centroid seeds.
print('Elapsed time to cluster: %.4f s'% kmeans_elapsed_time)
print('F1score: ', metrics.f1_score(segarreshap, img_segmflat, average='weighted'))
print('Accuracy score: ' ,metrics.accuracy_score(segarreshap,img_segmflat)) #sklearn accuracy
print('Accuracy score: ' ,accuracy(segarreshap,img_segmflat))
print('Homogeneity score: ', metrics.homogeneity_score(segarreshap,img_segmflat))
print('Rand score: ' , metrics.adjusted_rand_score(segarreshap,img_segmflat))
print('VMeasure: ' , metrics.v_measure_score(segarreshap,img_segmflat))
print('Normalized mutual info score: ', metrics.normalized_mutual_info_score(segarreshap,img_segmflat))


# In[ ]:


#DBscan
from sklearn.cluster import DBSCAN
dbtime=time.time()
db = DBSCAN(eps=10, min_samples=20,metric='euclidean')
dbf = db.fit(image_cols)
db_elapsed_time = time.time() - dbtime
コード例 #7
0
    def test_accuracy_1(self):
        true_row_labels = [0, 0, 1, 1]
        predicted_row_labels = [0, 0, 1, 1]
        accuracy = external.accuracy(true_row_labels, predicted_row_labels)

        self.assertEqual(accuracy, 1)
コード例 #8
0
def acc(y_true, y_pred):
    from coclust.evaluation.external import accuracy
    acc = accuracy(y_true, y_pred)
 
    return(acc)
for l1, l2 in zip(encode.layers[:19], autoencoder.layers[0:19]):
    l1.set_weights(l2.get_weights())

pred_encode = encode.predict(X_test)

kmeans_clustering = KMeans(n_clusters=10)

pred_encode = pred_encode.reshape(-1, 7 * 7 * 7)

clustered_training_set = kmeans_clustering.fit_predict(pred_encode)

#!pip install coclust

from coclust.evaluation.external import accuracy

accuracy(y_test, clustered_training_set)

#confusion matrix

print(confusion_matrix(y_test, clustered_training_set))

from sklearn.utils.linear_assignment_ import linear_assignment
import numpy as np


def _make_cost_m(cm):
    s = np.max(cm)
    return (-cm + s)


indexes = linear_assignment(
コード例 #10
0
kmeans.fit(x)
print('SSE for clusters = ', '10', 'is', kmeans.inertia_)

#test_loss, test_acc = kmeans.evaluate(test_images,  test_labels, verbose=2)
predictions = kmeans.predict(X_test)
np_predictions = np.argmax(predictions)

print(predictions)

print(y_test)

print(metrics.adjusted_rand_score(y_test, predictions))

print(nmi(y_test, predictions))

print(accuracy(y, kmeans.labels_))

# In[2]:

from sklearn.metrics import confusion_matrix
print(confusion_matrix(y, kmeans.labels_))

from sklearn.utils.linear_assignment_ import linear_assignment
import numpy as np


def _make_cost_m(cm):
    s = np.max(cm)
    return (-cm + s)

コード例 #11
0
def clustering_accuracy(labels, predicted_labels, offset = 40):
    labels = labels - 40
    acc = accuracy(labels, predicted_labels)
    return acc