-
Notifications
You must be signed in to change notification settings - Fork 0
/
bsrelSimCSVconvolve.py
353 lines (318 loc) · 12.8 KB
/
bsrelSimCSVconvolve.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
#! /usr/bin/env python
# Take a CSV file, fit file, etc for one taxa and convolve them as a single
# CSV file.
import csv
import os
import sys
import argparse
from bsrelSimParsers import ( recover_csv, recover_settings,
recover_csv_mg94)
#def append_fit(buffer, filename):
#def append_simulated(buffer, filename):
def meandnds(omegas, props):
mean = 0
for omega, prop in zip(omegas, props):
mean += float(omega) * float(prop)
return mean
# take a column and transform into a column vector with the mean of the
# values in the old column, ignoring ignore_rows rows at the top, following
# leave_rows number of empty rows also at the top
def mean_column(column, ignore_rows=0, leave_rows=0):
padding = leave_rows * [0]
col_mean = sum(column[ignore_rows:]) / (len(column) - ignore_rows)
return padding + [col_mean]
# take a column and transform it into a column vector with the max of the
# values in the old column, ignoring ignore_rows rows at the top, following
# leave_rows number of emtpy rows also at the top.
def max_column(column, ignore_rows=0, leave_rows=0):
padding = leave_rows * [0]
col_max = max(column[ignore_rows:])
return padding + [col_max]
def sum_column(column, ignore_rows=0, leave_rows=0):
padding = leave_rows * [0]
col_max = sum(column[ignore_rows:])
return padding + [col_max]
def min_column(column, ignore_rows=0, leave_rows=0):
padding = leave_rows * [0]
col_max = min(column[ignore_rows:])
return padding + [col_max]
def append_BSREL3(buffer, filename, whole_tree):
contents = recover_csv(filename)
#print("buffer subset:")
#print(buffer[1:][0].split(",")[0])
branch_order = [line.split(",")[0] for line in buffer[1:]]
try:
len_column = rep_to_column(contents, "length", branch_order)
except KeyError:
print("Broken BSREL3 file: ", filename)
return buffer
len_column[0] = "BSREL3_length"
buffer = append_column(buffer, len_column)
#print(contents)
omegas_column = rep_to_column(contents, "omegas", branch_order)
props_column = rep_to_column(contents, "props", branch_order)
omegas_column[0] = "BSREL3_meandnds"
props_column[0] = "BSREL3_propOverOne"
mean_omegas_column = [ meandnds(omegas, props)
for omegas,props in
zip(omegas_column[1:], props_column[1:])]
if whole_tree:
mean_omegas_column = mean_column(mean_omegas_column)
mean_omegas_column.insert(0, "BSREL3_meandns")
#print(mean_omegas_column)
omega_over_one_column = [ max([float(o) for o in omegas])
if max([float(o) for o in omegas]) > 1
else 0
for omegas in omegas_column[1:]]
orig_omega_over_one_column = omega_over_one_column
if whole_tree:
omega_over_one_column = max_column(omega_over_one_column)
omega_over_one_column.insert(0, "BSREL3_OmegaOver1")
#print(omega_over_one_column)
prop_over_one_column = [props[len(props)-1]
if omegas[len(omegas)-1] > 1
else 0
for omegas, props in zip(omegas_column[1:],
props_column[1:])]
if whole_tree:
prop_over_one_column = prop_over_one_column[
orig_omega_over_one_column.index(
omega_over_one_column[-1])]
prop_over_one_column.insert(0, "BSREL3_propOverOne")
#print(prop_over_one_column)
max_omega_column = [omegas[-1]
for omegas in omegas_column[1:]];
orig_max_omega_column = max_omega_column
if whole_tree:
max_omega_column = max_column(max_omega_column)
max_omega_column.insert(0, "BSREL3_MaxOmega")
#print(max_omega_column)
max_prop_column = [props[-1]
for props in props_column[1:]];
if whole_tree:
max_omega_colum = max_prop_column[
orig_max_omega_column.index(
max_omega_column[-1])]
max_prop_column.insert(0, "BSREL3_MaxOmegaProp")
#print(max_prop_column)
buffer = append_column(buffer, omega_over_one_column)
buffer = append_column(buffer, prop_over_one_column)
buffer = append_column(buffer, mean_omegas_column)
buffer = append_column(buffer, max_omega_column)
buffer = append_column(buffer, max_prop_column)
#print(buffer)
return buffer
def append_MG94(buffer, filename, whole_tree):
# XXX may need to be configured to work with incomplete csv
try:
contents = recover_csv_mg94(filename)
except IndexError:
print("Broken MG94 file: ", filename)
return buffer
branch_order = [line.split(',')[0] for line in buffer[1:]]
len_column = rep_to_column(contents, "length", branch_order)
#print("Got MG94")
len_column[0] = "MG94_length"
buffer = append_column(buffer, len_column)
#print(contents)
omegas_column = rep_to_column(contents, "omegas", branch_order)
omegas_column = [omegas[0][0] for omegas in omegas_column]
omegas_column[0] = "MG94_meandnds"
#mean_omegas_column = [ meandnds(omegas, props)
#for omegas,props in
#zip(omegas_column[1:], props_column[1:])]
#mean_omegas_column.insert(0, "MG94_meandns")
#print(mean_omegas_column)
buffer = append_column(buffer, omegas_column)
#print(buffer)
return buffer
def get_columns(rows):
columns = []
for column in rows[0].split(','):
columns.append([])
for row in rows:
for j,value in enumerate(row.split(',')):
columns[j].append(value)
return columns
# Take list of strings (lines), return same
def flatten_csv(filename, contents):
header = contents[0]
# the filename replaces the branch name as the first item in the list
flat_contents = [filename]
columns = get_columns(contents)
max_omega_over_one_index = -1
if len(columns[0]) <= 1:
print("empty csv: ", filename)
return
for column in columns:
# The first column is the branchname, which isn't useful
if column[0] == "Branch":
continue
else:
column_data = [float(a) for a in column[1:]]
if column[0] == "RateClasses":
flat_contents.append(max_column(column_data)[0])
elif column[0] == "OmegaOver1":
flat_contents.append(max_column(column_data)[0])
max_omega_over_one_index = column_data.index(max_column(
column_data)[0])
elif column[0] == "WtOmegaOver1":
if max_omega_over_one_index != -1:
flat_contents.append(float(column_data[max_omega_over_one_index]))
else:
flat_contents.append(0)
elif column[0] == "LRT":
flat_contents.append(min_column(column_data)[0])
elif column[0] == "p":
flat_contents.append(min_column(column_data)[0])
elif column[0] == "p_Holm":
flat_contents.append(min_column(column_data)[0])
elif column[0].strip('\n') == "BranchLength":
flat_contents.append(sum_column(column_data)[0])
else:
flat_contents.append(mean_column(column_data)[0])
return [header, ','.join([str(a) for a in flat_contents])]
def analyze_csv_sig_branches(contents):
sig_branches = []
for line in contents:
# XXX debug
#print(line.split(',')[-2])
if float(line.split(',')[-2]) < 0.05:
sig_branches.append(line.split(',')[0])
return sig_branches
def append_csv( buffer,
filename,
whole_tree):
file = open(filename, 'r')
contents = file.readlines()
if len(contents) != 0:
to_add = contents
# There is a header, doesn't need to be analyzed
sig_list = analyze_csv_sig_branches(contents[1:])
if whole_tree:
# The header ought not be flattened, but it is useful for guiding the
# flattening
to_add = flatten_csv(filename, contents)
if to_add != None and to_add != []:
buffer += to_add
return sig_list
else:
return []
def append_column(buffer, column):
# strip newlines, add comma, replace newlines
for i, line in enumerate(buffer):
buffer[i] = line.strip("\n") + "," + str(column[i]) + "\n"
return buffer
def concat_buffers(buffer1, buffer2):
if len(buffer1) != 0:
if len(buffer2) != 1:
buffer2 = buffer2[1:]
if buffer1[-1].split(',')[-2:] != '\n':
buffer1[-1] += '\n'
return buffer1 + buffer2
def rep_to_column(rep, key, order):
column = [key]
values = [rep[branch][key] for branch in order]
column += values
return column
def rep_to_csv(rep):
csv = [[]]
header = []
header.append("Branch")
for key, value in rep.items():
row = []
row.append(key)
for branch_key, branch_value in value.items():
if header.count(branch_key) == 0:
header.append(branch_key)
row.append(branch_value)
else:
row.insert(header.index(branch_key), branch_value)
csv.append(row)
csv.insert(0, header)
return csv
def write_buffer(buffer, filename):
file = open(filename, 'w')
file.writelines(buffer)
return 0
# prefixes are a list of filenames that start sets "longPy.279" etc.
def run_batch( buffer,
prefixes,
whole_tree):
# for fileset in prefixes
sig_branch_dict = {}
sig_tree_count = 0
for prefix in prefixes:
buffer2 = []
#csv_filename = prefix + ".sim.0.recovered"
csv_filename = prefix
this_sig_branches = append_csv( buffer2,
csv_filename,
whole_tree)
# XXX debug
#print("Sigs for this branch:")
#print(this_sig_branches)
# Accounting:
if len(this_sig_branches) > 0:
sig_tree_count += 1
for branch in this_sig_branches:
if branch not in sig_branch_dict:
sig_branch_dict[branch] = 1
else:
sig_branch_dict[branch] += 1
# Append others if available:
# try:
# append_BSREL3(buffer2, csv_filename + ".BSREL", whole_tree)
# except FileNotFoundError:
# continue
# try:
# append_MG94(buffer2, csv_filename + ".mglocal.csv",
# whole_tree)
# except FileNotFoundError:
# continue
buffer = concat_buffers(buffer, buffer2)
return buffer, sig_branch_dict, sig_tree_count
def get_prefixes(sim_dir):
import glob
import re
file_list = glob.glob(sim_dir + os.sep + "*.out")
#file_list = [a for a in file_list if re.search("^\w+\/\w+\.\d+$", a) != None]
return file_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input csv file or directory")
parser.add_argument("output", help="output file")
parser.add_argument("--whole-tree",
dest="whole_tree",
help="treat the tree as a single unit, rather than \
the branches independently",
action='store_true')
parser.add_argument("--stats",
dest="print_stats",
help="print the number of branches found to be \
under significant positive selection, the number of \
trees found to contain at least one branch under \
significant positive selection and a list of all \
branches and hit count for each branch found to be \
under significant positive selection",
action='store_true')
args = parser.parse_args()
buffer = []
# dir input:
sim_dir = args.input
output_filename = args.output
prefixes = get_prefixes(sim_dir)
if len(prefixes) == 0:
print("Error: no valid files found")
exit(1)
buffer, sig_branches_dict, sig_tree_count = run_batch( buffer,
prefixes,
args.whole_tree)
if args.print_stats:
print(len(prefixes), " total trees")
print( str(sum(sig_branches_dict.values())),
" significant branches found")
print(str(sig_tree_count), " significant trees found")
print("branches and hit count:")
print(sig_branches_dict)
write_buffer(buffer, output_filename)