/
elviz_cluster.py
executable file
·259 lines (222 loc) · 9.94 KB
/
elviz_cluster.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
#!/usr/bin/env python3
"""
"""
import getopt
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import re
import seaborn as sns
import sys
from matplotlib.backends.backend_pdf import PdfPages
import scipy.spatial.distance
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn import mixture
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from elviz_utils import IMPORT_DATA_TYPES, read_elviz_CSV, read_elviz_CSVs, read_pickle_or_CSVs
MIN_ROWS = 20
EPS = .3
MIN_SAMPLES = 4
MAX_AVG_FOLD = 500 # I've seen over 20k, 500 seems to be 2x typical
DATA_PICKLE = 'data/data.pkl' # filename of previously parsed data
RAW_DATA_DIR = './raw_data/' # location of CSV files
RESULTS_DIR = './results/' # location of results output
HEURISTIC_SAMPlE_SIZE = 10
HEURISTIC_PDF = 'heuristic.pdf'
HEURISTIC_PICKLE = 'data/heuristic.pkl'
DPGMM_N_COMPONENTS = 10
CLUSTER_COLUMNS = ['Average fold', 'Reference GC']
CONTIG_COLUMN = 'IMG scaffold_oid'
sns.set()
sns.set(style="whitegrid")
def dbscan_heuristic(elviz_data, scaler):
if os.path.isfile(HEURISTIC_PICKLE):
print("reading %s for previously computed heuristic data" % HEURISTIC_PICKLE)
with open(HEURISTIC_PICKLE, 'rb') as file:
distances = pickle.load(file)
else:
print("processing full data set to compute heuristic for epsilon / N")
distances = []
for filename in elviz_data.keys():
# progress monitor
print(".", end="")
sys.stdout.flush()
df = elviz_data[filename]
dfgb = df.groupby(['Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus'])
for key in dfgb.indices.keys():
idx = dfgb.indices[key]
tax_rows = df.iloc[idx]
if len(tax_rows) < HEURISTIC_SAMPlE_SIZE:
continue
# select out the columns of relevance
#reduced_df = scaler.transform(df[CLUSTER_COLUMNS])
reduced_df = pd.DataFrame(scaler.transform(df[CLUSTER_COLUMNS]), columns=CLUSTER_COLUMNS)
# create a random sample of the rows
random_sample = reduced_df.sample(HEURISTIC_SAMPlE_SIZE)
# create array of complex #s using a as first columns and b from second (a + bi)
p1 = (random_sample[CLUSTER_COLUMNS[0]] + 1j * random_sample[CLUSTER_COLUMNS[1]]).values
p2 = (reduced_df[CLUSTER_COLUMNS[0]] + 1j * reduced_df[CLUSTER_COLUMNS[1]]).values
# calculate all the distances, between each point in random sample
# and all data (using an array-broadcasting trick)
all_dists = abs(p1[..., np.newaxis] - p2)
# sort along each row
all_dists.sort(axis=1)
# if we don't tolist this, it will save the entire matrix
# with a view representation stored in distances, converting
# this to a list sidesteps this and the associated memory
# problem
distances.append(all_dists[:, MIN_SAMPLES].tolist())
with open(HEURISTIC_PICKLE, 'wb') as file:
pickle.dump(distances, file, pickle.HIGHEST_PROTOCOL)
distances = [item for sublist in distances for item in sublist]
with PdfPages(RESULTS_DIR + HEURISTIC_PDF) as pdf:
print("\ncomputing histogram and making figure")
# plt.hist(distances, bins=50, range=(0, 2))
distances.sort(reverse=True)
plt.plot(distances[0:100])
plt.title("THIS IS A PLOT TITLE, YOU BET")
plt.xlabel("Sorted index")
plt.ylabel("k-dist (k = %d)" % MIN_SAMPLES)
pdf.savefig()
plt.close()
def dump_clusters(filename, key, labels, contigs):
name = list(filter(None, key))[-1]
cluster = 1
unique_labels = set(labels)
for k in unique_labels:
if k == -1:
continue
dat_filename = filename.replace("csv", ("%s-%d.dat") % (name, cluster))
contig_list = contigs[labels == k]
cluster += 1
with open(RESULTS_DIR + dat_filename, 'w') as file:
for item in contig_list:
file.write("{}\n".format(item))
print("%s - %d valid clusters" % (name, cluster))
def plot_clusters(pdf, df, title, labels, core_samples_mask, limits):
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)), alpha=0.6)
for k, col in zip(unique_labels, colors):
if k == -1:
# black used for noise.
col = [0, 0, 0, .6]
class_member_mask = (labels == k)
# plot the core samples
xy = df[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
# plot those joined by extension
xy = df[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=3)
# format the plot
plt.title(title)
plt.xlabel(CLUSTER_COLUMNS[0], fontsize=10)
plt.ylabel(CLUSTER_COLUMNS[1], fontsize=10)
plt.xlim(limits["x"])
plt.ylim(limits["y"])
pdf.savefig()
plt.close()
def main(argv):
dbscan_heuristic_mode = False
dpgmm_mode = False
do_plot_clusters = False
do_dump_clusters = False
try:
opts, args = getopt.getopt(argv,"hegdp")
except getopt.GetoptError:
print('elviz_cluster.py [-h] [-e] [-g] [-d] [-p]')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('elviz_cluster.py [-h] [-e]')
print(' -h = help, -e = run dbscan' +
' epsilon heuristic plot generation code')
print(' -g = use a DPGMM for clustering')
print(' -p = plot the clusters to a PDF file')
print(' -d = dump the clusters to a text file')
sys.exit()
elif opt == '-e':
dbscan_heuristic_mode = True
elif opt == '-g':
dpgmm_mode = True
elif opt == '-p':
do_plot_clusters = True
elif opt == '-d':
do_dump_clusters = True
[elviz_data, combined_df] = read_pickle_or_CSVs(DATA_PICKLE, RAW_DATA_DIR)
# Setup plotting limits
print("determining plotting limits")
limits = {"x": [combined_df['Average fold'].min(), MAX_AVG_FOLD],
"y": [combined_df['Reference GC'].min(), combined_df['Reference GC'].max()]}
# Below changed in favor of fixed MAX
# limits["x"] = [combined_df['Average fold'].min(), combined_df['Average fold'].max()]
# fixed MAX below
print("normalizing data prior to clustering")
# normalize the combined data to retrieve the normalization parameters
scaler = StandardScaler().fit(combined_df[CLUSTER_COLUMNS])
# serializing outputs
if dbscan_heuristic_mode:
print("making DBSCAN heuristic plots")
dbscan_heuristic(elviz_data, scaler)
os.sys.exit()
print("serially processing files")
for filename in elviz_data.keys():
pdf_filename = filename.replace("csv", "pdf")
# skip if the PDF already exists
if os.path.isfile(RESULTS_DIR + pdf_filename):
print("skiping file %s" % filename)
continue
print("processing file %s" % filename)
df = elviz_data[filename]
# create a multipage PDF for storing the plots
with PdfPages(RESULTS_DIR + pdf_filename) as pdf:
# find unique values of taxonomy columns
dfgb = df.groupby(['Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'])
for key in dfgb.indices.keys():
idx = dfgb.indices[key]
tax_rows = df.iloc[idx]
if len(tax_rows) < MIN_ROWS:
continue
# normalize all dimensions to be used in clustering, e.g. GC, coverage, rpk
# reuse the scaler we created from all of the data for the transform
tax_rows_cluster_columns = scaler.transform(tax_rows[CLUSTER_COLUMNS])
if not dpgmm_mode:
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES)
db.fit(tax_rows_cluster_columns)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
else:
db = mixture.DPGMM(n_components=DPGMM_N_COMPONENTS, n_iter=100,
covariance_type='full', alpha=100, verbose=0)
db.fit(tax_rows_cluster_columns)
Y_ = db.predict(tax_rows_cluster_columns)
for i, (mean, covar) in enumerate(zip(
db.means_, db._get_covars())):
if not np.any(Y_ == i):
continue
#plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
labels = Y_
core_samples_mask = np.zeros_like(labels, dtype=bool)
core_samples_mask[:] = True
#print(labels)
#print(type(labels))
# number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if n_clusters_ < 1:
continue
#print(tax_rows_cluster_columns)
title = ', '.join(key)
if (do_plot_clusters):
plot_clusters(pdf, scaler.inverse_transform(tax_rows_cluster_columns),
title, labels, core_samples_mask, limits)
if (do_dump_clusters):
dump_clusters(filename, key, labels, tax_rows[CONTIG_COLUMN]);
if __name__ == "__main__":
main(sys.argv[1:])