Example #1
0
def log_callback_kmeans_conductance(embeddings, adjancy_matrix, n_centroid):
    kmeans = pkm.PoincareKMeans(n_centroid)
    kmeans.fit(embeddings)
    i = kmeans.predict(embeddings)
    r = torch.arange(0, i.size(0), device=i.device)
    prediction = torch.zeros(embeddings.size(0), n_centroid)
    prediction[r, i] = 1
    return {
        "conductance": evaluation.mean_conductance(prediction, adjancy_matrix)
    }
Example #2
0
# print(prediction_mat.sum(0))

conductences = []
adjency_matrix = X

for i in tqdm.trange(args.n):
    algs = ekm.sklearnKMeans(5)
    algs.fit(representations)
    prediction = algs.predict(representations).long()
    # print(prediction)
    prediction_mat = torch.LongTensor(
        [[1 if (y == prediction[i]) else 0 for y in range(5)]
         for i in range(len(X))])
    # print(prediction_mat)
    conductences.append(
        evaluation.mean_conductance(prediction_mat, adjency_matrix))

C = torch.Tensor(conductences)
print("Maximum conductence -> ", C.max().item())
print("Mean conductence -> ", C.mean().item())
print("stdconductence -> ", C.std().item())
log_in.append({
    "evaluation_unsupervised_come_cond": {
        "unsupervised_conductence": C.tolist()
    }
})

nmi = []
ground_truth = torch.LongTensor([[1 if (y in Y[i]) else 0 for y in range(5)]
                                 for i in range(len(X))])
for i in tqdm.trange(args.n):