forked from argriffing/xgcode
/
20100805b.py
119 lines (110 loc) · 3.91 KB
/
20100805b.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
"""
Find the number of agglomerated clusters using the Calinski-Harabasz index.
Apply hierarchical (agglomerative) clustering,
using squared error and average linkage.
This follows the protocol of Tibshirani et al. in example 4.1
of 'Estimating the number of clusters in a data set via the gap statistic'.
"""
from StringIO import StringIO
import os
import time
import numpy as np
from SnippetUtil import HandlingError
import Form
import FormOut
import Util
import Carbone
import agglom
import kmeans
import const
import RUtil
g_tags = ['pca:compute']
g_default = const.read('20100709a')
def get_form():
"""
@return: the body of a form
"""
form_objects = [
Form.MultiLine('table', 'R table', g_default),
Form.Sequence('axes', 'column labels of Euclidean axes',
('pc1', 'pc2', 'pc3')),
Form.CheckGroup('options', 'more options', [
Form.CheckItem('verbose',
'show calinski index values', True)])]
return form_objects
def get_form_out():
"""
@return: the format of the output
"""
return FormOut.Report('report')
def get_response_content(fs):
# read the table
rtable = RUtil.RTable(fs.table.splitlines())
header_row = rtable.headers
data_rows = rtable.data
Carbone.validate_headers(header_row)
# get the numpy array of conformant points
h_to_i = dict((h, i+1) for i, h in enumerate(header_row))
axis_headers = fs.axes
if not axis_headers:
raise ValueError('no Euclidean axes were provided')
axis_set = set(axis_headers)
header_set = set(header_row)
bad_axes = axis_set - header_set
if bad_axes:
raise ValueError('invalid axes: ' + ', '.join(bad_axes))
axis_lists = []
for h in axis_headers:
index = h_to_i[h]
try:
axis_list = Carbone.get_numeric_column(data_rows, index)
except Carbone.NumericError:
raise ValueError(
'expected the axis column %s '
'to be numeric' % h)
axis_lists.append(axis_list)
points = np.array(zip(*axis_lists))
# do the clustering while computing the calinski index at each merge
cluster_counts = []
wgss_values = []
neg_calinskis = []
allmeandist = kmeans.get_allmeandist(points)
cluster_map = agglom.get_initial_cluster_map(points)
w_ssd_map = agglom.get_initial_w_ssd_map(points)
b_ssd_map = agglom.get_initial_b_ssd_map(points)
q = agglom.get_initial_queue(b_ssd_map)
while len(cluster_map) > 2:
# do an agglomeration step
pair = agglom.get_pair_fast(cluster_map, q)
agglom.merge_fast(cluster_map, w_ssd_map, b_ssd_map, q, pair)
# compute the within group sum of squares
indices = cluster_map.keys()
wgss = sum(w_ssd_map[i] / float(len(cluster_map[i])) for i in indices)
# compute the between group sum of squares
bgss = allmeandist - wgss
# get the calinksi index
n = len(points)
k = len(cluster_map)
numerator = bgss / float(k - 1)
denominator = wgss / float(n - k)
calinski = numerator / denominator
# append to the lists
cluster_counts.append(k)
wgss_values.append(wgss)
neg_calinskis.append(-calinski)
# Get the best cluster count according to the calinski index.
# Do this trickery with negs so that it breaks ties
# using the smallest number of clusters.
neg_calinksi, best_k = min(zip(neg_calinskis, cluster_counts))
# create the response
out = StringIO()
print >> out, 'best cluster count: k = %d' % best_k
if fs.verbose:
print >> out
print >> out, '(k, wgss, calinski):'
for k, wgss, neg_calinski in zip(
cluster_counts, wgss_values, neg_calinskis):
row = (k, wgss, -neg_calinski)
print >> out, '\t'.join(str(x) for x in row)
# return the response
return out.getvalue()