Ejemplo n.º 1
0
            representations = torch.Tensor(V)

            #### unsupervised evaluation ####

            # fitting GMM
            algs = GaussianMixtureSKLearn(n_communities)
            algs.fit(representations)

            #get and transform prediction
            prediction = algs.predict(representations).long()
            prediction_mat = torch.LongTensor([[
                1 if (y == prediction[i]) else 0 for y in range(n_communities)
            ] for i in range(len(X))])

            # get the conductance
            conductance = evaluation.conductance(prediction_mat, X)

            # get the NMI
            ground_truth = torch.LongTensor(
                [[1 if (y in Y[i]) else 0 for y in range(n_communities)]
                 for i in range(len(X))])
            nmi = evaluation.nmi(prediction_mat, ground_truth)

            # get the accuracy
            accuracy = evaluation.precision1_unsupervised_evaluation(
                prediction, D.Y, n_communities)

            ## fill results
            results_unsupervised = {
                "accuracy": accuracy,
                "conductance": conductance,
Ejemplo n.º 2
0
     for i in range(len(X))])
if args.verbose:
    print("\nEvaluate accuracy, conductence and normalised mutual information")
for i in range(args.n):
    current_gmm = gmm_list[i]
    accuracies.append(
        evaluation.poincare_unsupervised_em(representations,
                                            D.Y,
                                            n_gaussian,
                                            em=current_gmm,
                                            verbose=False))
    prediction = current_gmm.predict(representations)
    prediction_mat = torch.LongTensor(
        [[1 if (y == prediction[i]) else 0 for y in range(n_gaussian)]
         for i in range(len(X))])
    conductances.append(evaluation.conductance(prediction_mat, adjency_matrix))
    nmis.append(evaluation.nmi(prediction_mat, ground_truth))

# log results
log_in.append({
    log_prefix + "unsupervised_eval": {
        "accuracy": accuracies,
        "conductance": conductances,
        "nmi": nmis
    }
})

import numpy as np
# print results
print("Results of unsupervised evaluation ")
print("\t Mean accuracy : ", sum(accuracies, 0.) / args.n)
Ejemplo n.º 3
0
                        prediction,
                        D.Y,
                        n_gaussian,
                    ))
                prediction_mat = torch.zeros(len(X), n_gaussian).cuda()
                gt_mat = [
                    torch.LongTensor(ground_truth[i]).cuda()
                    for i in tqdm.trange(len(D.Y))
                ]
                print("create prediction matrix")
                for i in tqdm.trange(len(D.Y)):
                    prediction_mat[i][prediction[i]] = 1
                # prediction_mat = torch.LongTensor([[ 1 if(y == prediction[i]) else 0 for y in range(n_gaussian)] for i in range(len(X))])
                print("Evaluate Conductance")
                conductances.append(
                    evaluation.conductance(prediction_mat, adjency_matrix))
                nmis.append(evaluation.nmi(prediction_mat, ground_truth))

            # log results
            log_in.append({
                log_prefix + "unsupervised_eval_k-means": {
                    "accuracy": accuracies,
                    "conductance": conductances,
                    "nmi": nmis
                }
            })

            import numpy as np
            # print results
            print("Results of unsupervised evaluation for ", directory)
            print("\t Mean accuracy : ", sum(accuracies, 0.) / args.n)
Ejemplo n.º 4
0
import torch

from rcome.evaluation_tools.evaluation import nmi, conductance
from rcome.data_tools import data_loader
from rcome.clustering_tools import poincare_em

# loading data and learned embeddings
X, Y = data_loader.load_corpus("LFR1", directed=False)
n_gaussian = 13
representations = torch.load("LOG/all_loss/representation.pth")
ground_truth = torch.LongTensor([[1 if(y in Y[i]) else 0 for y in range(n_gaussian)] for i in range(len(X))])

# estimate the gmm
em_alg = poincare_em.PoincareEM(13)
em_alg.fit(representations)

# predict associated gaussian 
prediction = em_alg.predict(representations)
prediction_mat = torch.LongTensor([[1 if(y in prediction[i]) else 0 for y in range(n_gaussian)] for i in range(len(X))])


conductance_scores = conductance(prediction_mat, X)
print("Conductance score is ", torch.Tensor(conductance_scores).mean().item(), "+-",
                            torch.Tensor(conductance_scores).std().item())
nmi_score = nmi(prediction_mat, ground_truth)
print("NMI score is ", nmi_score)