-
Notifications
You must be signed in to change notification settings - Fork 0
/
cluster.py
executable file
·148 lines (112 loc) · 2.97 KB
/
cluster.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#!/usr/bin/python
import gzip
import cPickle
import os
import numpy
import scipy.cluster.hierarchy as hcluster
import scipy.stats
from util import load, is_measure
import metrics_suite
thresh = 1.1
metricname = 'cityblock'
pthresh = 0.01
def collect(metricnames, scores, res, cumulative):
for s in scores:
for (m, (l, (sworst, sbest, savg))) in zip(metricnames, s):
if l > 0:
x = float(savg)
normalised = x / l
else:
continue
if x < 0:
continue
if m not in res:
res[m] = [x]
cumulative[m] = (1, normalised)
else:
res[m].append(x)
(n, cum) = cumulative[m]
cumulative[m] = (n + 1, cum + normalised)
def load_evaluations(evalfs, metricnames):
res = {}
cumulative = {}
benchnames = []
for fname in evalfs:
try:
scores = load(fname)
except:
continue
collect(metricnames, scores, res, cumulative)
return (res, cumulative)
def cluster(res):
data = res.values()
clusters = hcluster.fclusterdata(data, thresh, metric=metricname)
clustered = {}
for (m, c) in zip(res.keys(), clusters):
if c not in clustered:
clustered[c] = [m]
else:
clustered[c].append(m)
return clustered.values()
def print_clusters(clusters, cumulative):
i = 1
for c in clusters:
print "Cluster %d:" % i
i += 1
for m in c:
(n, cum) = cumulative[m]
score = (cum / n) * 100.0
print "%s: %.02f" % (m, score)
print ""
def compare(m1, m2):
(T, p) = scipy.stats.wilcoxon(m1, m2)
if p <= pthresh:
if sum(m1) > sum(m2):
return p
else:
return -p
return 0
def mean(xs):
return sum(xs) / len(xs)
def find_better(evals, m):
baseline = evals[m]
better = []
worse = []
same = []
for (k, vs) in evals.iteritems():
if not is_measure(k):
continue
diff = compare(baseline, vs)
x = mean(vs) * 100
if diff == 0:
same.append((x, k))
elif diff > 0:
better.append((x, k))
else:
worse.append((x, k))
return (better, same, worse)
def print_better(m, cumulative, better, same, worse):
print "BETTER than %s (%d):" % (m, len(better))
for (x, n) in sorted(better):
(k, z) = cumulative[n]
score = (z / k) * 100
print "%s %.02f%%" % (n, score)
print "\nWORSE than %s (%d):" % (m, len(worse))
for (x, n) in sorted(worse):
(k, z) = cumulative[n]
score = (z / k) * 100
print "%s %.02f%%" % (n, score)
print "\nTHE SAME as %s (%d):" % (m, len(same))
for (x, n) in sorted(same):
(k, z) = cumulative[n]
score = (z / k) * 100
print "%s %.02f%%" % (n, score)
def split_hypothesis(evalfs, m):
metricnames = metrics_suite.suite.keys()
(evals, cumulative) = load_evaluations(evalfs, metricnames)
return (cumulative, find_better(evals, m))
if __name__ == '__main__':
import sys
evalfs = sys.argv[1:]
(cumulative, (better, same, worse)) = split_hypothesis(evalfs, "Rand")
print_better("Rand", cumulative, better, same, worse)