def get_combined_counts(table, positions): bases = 'ACGT' if type(positions) == str: counts = reduced_one_position(table, positions) mut_counts = counts['M'] unmut_counts = counts['R'] positions = [positions] states = bases header = ['mut', 'base', 'count'] else: counts = reduced_multiple_positions(table, *positions) mut_counts = counts['M'] unmut_counts = counts['R'] states = product(*list([bases] * len(positions))) header = ['mut'] + ['base%d' % (i + 1) for i in range(len(positions))] + ['count'] combined = [] for state in states: combined.append(['R'] + list(state) + [unmut_counts[state]]) combined.append(['M'] + list(state) + [mut_counts[state]]) counts_table = LoadTable(header=header, rows=combined) counts_table = counts_table.sorted(columns=header[:-1]) return counts_table
def summary_stat_table(table, factors): '''returns summary statistics for classifier, feature set combination''' fscore_cols = [c for c in table.header if c.startswith('fscore')] distinct = table.distinct_values(factors) rows = [] for comb in tqdm(distinct, ncols=80): subtable = table.filtered(lambda x: tuple(x) == tuple(comb), columns=factors) aurocs = numpy.array(subtable.tolist('auc')) mean_prec = numpy.array(subtable.tolist('mean_precision')) accuracy = numpy.array(subtable.tolist('balanced_accuracy')) row = list(comb) + [ aurocs.mean(), aurocs.std(ddof=1), mean_prec.mean(), mean_prec.std(ddof=1), accuracy.mean(), accuracy.std(ddof=1) ] for col in fscore_cols: data = numpy.array(subtable.tolist(col)) row.append(data.mean()) row.append(data.std(ddof=1)) rows.append(row) header = list(factors) + [ 'mean_auc', 'std_auc', 'mean_ap', 'std_ap', 'mean_balanced_accuracy', 'std_balanced_accuracy' ] for col in fscore_cols: header.extend([f'mean_{col}', f'std_{col}']) table = LoadTable(header=header, rows=rows) table = table.sorted(reverse='mean_auc') return table
def make_strand_symmetric_table(table): '''takes a combined counts table and returns a table with reverse complemented seqs Uses MUTATION_COMPLEMENTS''' new_data = [] direction_index = [ i for i in range(len(table.header)) if table.header[i] == 'direction' ][0] for plus, minus in list(MUTATION_COMPLEMENTS.items()): plus_table = table.filtered('direction=="%s"' % plus) plus_data = add_strand_column(plus_table.tolist(), '+') new_data.extend(plus_data) minus_table = table.filtered('direction=="%s"' % minus) if minus_table.shape[0] == 0: continue minus_table = _reverse_complement(minus_table) minus_data = minus_table.tolist() for row in minus_data: row[direction_index] = plus minus_data = add_strand_column(minus_data, '-') new_data.extend(minus_data) return LoadTable(header=table.header[:] + ['strand'], rows=new_data)
def spectra_table(table, group_label): """returns a table with columns without position information""" assert 'direction' in table.header if 'mut' in table.header: # remove redundant category (counts of M == R) table = table.filtered("mut=='M'") columns = ['count', 'direction', group_label] table = table.get_columns(columns) # so we have a table with counts per direction results = [] group_categories = table.distinct_values(group_label) filter_template = "direction=='%(direction)s' and "\ "%(label)s=='%(category)s'" for direction in table.distinct_values('direction'): start = direction[0] for group_category in group_categories: condition = dict(direction=direction, label=group_label, category=group_category) sub_table = table.filtered(filter_template % condition) total = sub_table.summed('count') results.append([total, start, direction, group_category]) result = LoadTable(header=['count', 'start', 'direction', group_label], rows=results) return result
def test_deserialise_tabular_table(self): """correctly deserialises Table""" from cogent3 import LoadTable table = LoadTable( header=["id", "foo", "bar"], rows=[ [1, "abc", 11], [2, "bca", 22], [3, "cab", 33], [4, "abc", 44], [5, "bca", 55], ], ) json = table.to_json() got = deserialise_object(json) self.assertEqual(got.to_dict(), table.to_dict())
def get_grouped_combined_counts(table, position, group_label): """wraps motif_count.get_combined_counts for groups""" group_cats = table.distinct_values(group_label) all_data = [] header = None for category in group_cats: subtable = table.filtered(lambda x: x == category, columns=group_label) counts = motif_count.get_combined_counts(subtable, position) if header is None: header = [group_label] + list(counts.header) counts = counts.with_new_column(group_label, lambda x: category, columns=counts.header[0]) all_data.extend(counts.tolist(header)) counts = LoadTable(header=header, rows=all_data) counts.sorted(columns=[group_label, 'mut']) return counts
def test_reverse_complement(self): table = LoadTable(header=self.header, rows=self.data) ex = [[1670, 'A', 'A', 'A', 'A', 'M', 'AtoC'], [557, 'G', 'T', 'T', 'C', 'M', 'AtoC'], [1479, 'T', 'T', 'C', 'T', 'M', 'AtoC'], [925, 'C', 'T', 'T', 'C', 'M', 'AtoC'], [1919, 'T', 'T', 'G', 'T', 'M', 'AtoC'], [442, 'T', 'G', 'T', 'C', 'M', 'AtoC']] got = _reverse_complement(table) raw_got = got.tolist() self.assertEqual(raw_got, ex)
def load_table_from_delimited_file(path, sep='\t'): '''returns a Table object after a quicker loading''' with open_(path, 'rt') as infile: header = infile.readline().strip().split(sep) count_index = header.index('count') records = [] for line in infile: line = line.strip().split(sep) line[count_index] = int(line[count_index]) records.append(line) table = LoadTable(header=header, rows=records) return table
def missing_species_names(names): """returns a Table of missing species names, or None""" missing = [] for name in names: n = Species.get_species_name(name) if n == "None": missing.append([name]) if missing: result = LoadTable(header=["MISSING SPECIES"], rows=missing) else: result = None return result
def display_available_dbs(account, release=None): """displays the available Ensembl databases at the nominated host""" db_list = get_db_name(account=account, db_type="core", release=release) db_list += get_db_name(account=account, db_type="compara", release=release) rows = [] for db_name in db_list: species_name = db_name.species if species_name: common_name = Species.get_common_name(db_name.species, level="ignore") if "compara" in db_name.name: species_name = common_name = "-" rows.append([db_name.release, db_name.name, species_name, common_name]) table = LoadTable(header=["Release", "Db Name", "Species", "Common Name"], rows=rows, space=2) table = table.sorted(["Release", "Db Name"]) table.legend = ( "Values of 'None' indicate cogent does not have a value for that database name." ) return table
def test_all_counts(self): """exercising all_acounts""" if os.path.exists(self.dirname): shutil.rmtree(self.dirname) runner = CliRunner() # should fail, as data files not in this directory r = runner.invoke( all_count_main, ["-cdata/*.txt", "-o%s" % self.dirname]) self.assertNotEqual(r.exit_code, 0) r = runner.invoke( all_count_main, ["-cdata/directions/*.txt", "-o%s" % self.dirname]) # should produce directory containing two files dirlist = os.listdir(self.dirname) self.assertEqual(set(dirlist), set(["combined_counts.txt", "combined_counts.log"])) # check the contents of combined_counts counts = LoadTable(os.path.join( self.dirname, "combined_counts.txt"), sep="\t") # 4**4 nbrs x 12 mutations x 2 (M/R groups) = 6144 counts = LoadTable(os.path.join( self.dirname, "combined_counts.txt"), sep="\t") self.assertEqual(counts.shape[0], 6144) shutil.rmtree(self.dirname)
def get_ms_supp_labels(float_type, texdir="../ENU-ms-genetics-v2", verbose=False): """returns ordered dicts of labels from the manuscript for supplementary and main manuscript body""" # assert float_type in ('fig', 'tab') # hardcoding these, in the manuscript order of the sections texfns = [ os.path.join(texdir, tfn) for tfn in ( "MS-introduction.tex", "MS-results.tex", "MS-discussion.tex", "MS-methods.tex", ) ] alllabels = None for tfn in texfns: tags = get_tags(tfn, "label") if alllabels is None: alllabels = tags else: alllabels.update(tags) print("\n\nWorking on labels") alllabels = filtertags(float_type, alllabels) allrefs = None for tfn in texfns: tags = get_tags(tfn, "ref") if allrefs is None: allrefs = tags else: allrefs.update(tags) print("\n\nWorking on refs") allrefs = filtertags(float_type, allrefs) mainrefs = filtertags(lambda x: not x.startswith("sup"), allrefs) suprefs = filtertags(lambda x: x.startswith("sup"), allrefs) missing = set(alllabels) - set(mainrefs) rows = [(missed, alllabels[missed]) for missed in missing] table = LoadTable(header=["label missing", "referenced in"], rows=rows) if verbose: print(table) return mainrefs, suprefs
def _reverse_complement(table): '''returns a table with sequences reverse complemented''' pos_indices = [ i for i, c in enumerate(table.header) if c.startswith('pos') ] rows = table.tolist() for row in rows: # we use the cogent3 DnaSeq object to do reverse complementing seq = DNA.make_seq(''.join(row[i] for i in pos_indices)) seq = list(seq.rc()) for i, index in enumerate(pos_indices): row[index] = seq[i] if rows: new = LoadTable(header=table.header, rows=rows) else: new = None return new
def test_aln_to_counts(self): """exercising aln_to_counts""" if os.path.exists(self.dirname): shutil.rmtree(self.dirname) makedirs(self.dirname) runner = CliRunner() # should fail, as data files not in this directory r = runner.invoke(aln_to_counts_main, ["-adata/sample_AtoC.fasta", "-o%s" % self.dirname, "-f1", "--direction=AtoC", "-S111", "-F"]) dirlist = os.listdir(self.dirname) self.assertEqual(r.exit_code, 0) self.assertEqual(set(dirlist), set(["sample_AtoC.txt", "sample_AtoC.log"])) counts = LoadTable(os.path.join( self.dirname, "sample_AtoC.txt"), sep="\t") # two columns with pos, two groups giving shape=2*16 self.assertEqual(counts.shape[0], 32) shutil.rmtree(self.dirname)
def status(configpath): """checks download/install status using checkpoint files and config""" release, remote_path, local_path, species_dbs = read_config(configpath) content = os.listdir(local_path) dbnames = reduce_dirnames(content, species_dbs) rows = [] for db in dbnames: row = [ db.name, is_downloaded(local_path, db.name), is_installed(local_path, db.name), ] rows.append(row) table = LoadTable( header=["dbname", "Downloaded", "Installed"], rows=rows, title="Status of download and install", legend="config=%s; local_path=%s" % (configpath.name, local_path), ) print(table)
def get_count_table(observed, control, k=None): """return table of motif counts Each motif position is a separate column. All possible DNA motifs of length k are included. Arguments: - observed: the observed counts as {seq: count} - control: the control counts as {seq: count} - k: size of the motif""" rows = [] lengths = set( list(map(len, list(observed.keys()))) + list(map(len, list(control.keys())))) if len(lengths) != 1: raise ValueError("Motifs not all same length: %s" % str(lengths)) length = list(lengths)[0] if k and length != k: raise ValueError("k[%d] doesn't match motif length [%d]" % (k, length)) elif k is None: k = length states = list(set(observed.keys()) | set(control.keys())) states.sort() for state in states: state = ''.join(state) control_counts = control[state] observed_counts = observed[state] if control_counts == observed_counts == 0: # we skip unobserved states continue rows.append([control_counts] + list(state) + ['R']) rows.append([observed_counts] + list(state) + ['M']) header = ['count'] + ["pos%d" % i for i in range(k)] + ['mut'] table = LoadTable(header=header, rows=rows) return table
def _parse_db_display(output, columns): """finds the table display and accumulates the content""" result = output.splitlines() header = [] for index, line in enumerate(result): if not header and columns[0] in line: header = columns break if header: rows = [] for i in range(index + 2, len(result)): line = result[i].strip() if line.startswith("----------"): break line = line.split() rows.append(line[:len(columns)]) table = LoadTable(header=header, rows=rows) else: table = None return table
def test_strandsym_table(self): """makes strand symmetric table""" data = [[1, 'T', 'T', 'T', 'T', 'M', 'TtoG'], [1, 'G', 'A', 'A', 'C', 'M', 'TtoG'], [1, 'A', 'G', 'A', 'A', 'M', 'TtoG'], [1, 'G', 'A', 'A', 'G', 'M', 'TtoG'], [1, 'A', 'C', 'A', 'A', 'M', 'TtoG'], [1, 'G', 'A', 'C', 'A', 'M', 'TtoG']] exp = [] for row in self.data: n = row[:] n.append('+') exp.append(n) for row in data: seq = list(map(DNA.complement, row[1:-2])) seq.reverse() n = [row[0]] + seq + ['M', 'AtoC'] n.append('-') exp.append(n) table = LoadTable(header=self.header, rows=self.data + data) r = make_strand_symmetric_table(table) self.assertEqual(r.tolist(), exp)
def single_group(counts_table, outpath, group_label, group_ref, positions, plot_config, first_order, dry_run): # Collect statistical analysis results summary = [] max_results = {} # Single position analysis print("Doing single position analysis") single_results = single_position_effects(counts_table, positions, group_label=group_label) summary += make_summary(single_results) max_results[1] = max(single_results[p]['rel_entropy'] for p in single_results) if not dry_run: outfilename = os.path.join(outpath, "1.json") util.dump_loglin_stats(single_results, outfilename) LOGGER.output_file(outfilename, label="analysis1") fig = get_single_position_fig( single_results, positions, plot_config.get('1-way plot', 'figsize'), group_label=group_label, group_ref=group_ref, figwidth=plot_config.get('1-way plot', 'figwidth'), xlabel_fontsize=plot_config.get('1-way plot', 'xlabel_fontsize'), ylabel_fontsize=plot_config.get('1-way plot', 'ylabel_fontsize'), xtick_fontsize=plot_config.get('1-way plot', 'xtick_fontsize'), ytick_fontsize=plot_config.get('1-way plot', 'ytick_fontsize')) format_offset(fig, int(plot_config.get('1-way plot', 'ytick_fontsize') * .8)) if not dry_run: outfilename = os.path.join(outpath, "1.pdf") fig.savefig(outfilename, bbox_inches='tight') print("Wrote", outfilename) fig.clf() # refresh for next section if first_order: msg = "Done! Check %s for your results" % outpath summary = LoadTable(header=['Position', 'RE', 'Deviance', 'df', 'prob', 'formula'], rows=summary, digits=2, space=2) if not dry_run: outfilename = os.path.join(outpath, "summary.txt") summary.write(outfilename, sep='\t') LOGGER.output_file(outfilename, label="summary") return msg print("Doing two positions analysis") results = get_two_position_effects(counts_table, positions, group_label=group_label) summary += make_summary(results) max_results[2] = max(results[p]['rel_entropy'] for p in results) if not dry_run: outfilename = os.path.join(outpath, "2.json") util.dump_loglin_stats(results, outfilename) LOGGER.output_file(outfilename, label="analysis2") fig = get_two_position_fig(results, positions, plot_config.get('2-way plot', 'figsize'), group_label=group_label, group_ref=group_ref, xtick_fontsize=plot_config.get( '2-way plot', 'xtick_fontsize'), ytick_fontsize=plot_config.get('2-way plot', 'ytick_fontsize')) fig.set_figwidth(plot_config.get('2-way plot', 'figwidth')) x_fsz = plot_config.get('2-way plot', 'xlabel_fontsize') y_fsz = plot_config.get('2-way plot', 'ylabel_fontsize') fig.text(0.5, plot_config.get('2-way plot', 'xlabel_pad'), 'Position', ha='center', va='center', fontsize=x_fsz) fig.text(plot_config.get('2-way plot', 'ylabel_pad'), 0.5, 'RE', ha='center', va='center', rotation='vertical', fontsize=y_fsz) format_offset(fig, int(plot_config.get('2-way plot', 'ytick_fontsize') * .8)) if not dry_run: outfilename = os.path.join(outpath, "2.pdf") fig.savefig(outfilename, bbox_inches='tight') print("Wrote", outfilename) fig.clf() # refresh for next section print("Doing three positions analysis") results = get_three_position_effects(counts_table, positions, group_label=group_label) summary += make_summary(results) max_results[3] = max(results[p]['rel_entropy'] for p in results) if not dry_run: outfilename = os.path.join(outpath, "3.json") util.dump_loglin_stats(results, outfilename) LOGGER.output_file(outfilename, label="analysis3") fig = get_three_position_fig(results, positions, plot_config.get('3-way plot', 'figsize'), group_label=group_label, group_ref=group_ref, xtick_fontsize=plot_config.get( '3-way plot', 'xtick_fontsize'), ytick_fontsize=plot_config.get('3-way plot', 'ytick_fontsize')) fig.set_figwidth(plot_config.get('3-way plot', 'figwidth')) x_fsz = plot_config.get('3-way plot', 'xlabel_fontsize') y_fsz = plot_config.get('3-way plot', 'ylabel_fontsize') fig.text(0.5, plot_config.get('3-way plot', 'xlabel_pad'), 'Position', ha='center', va='center', fontsize=x_fsz) fig.text(plot_config.get('3-way plot', 'ylabel_pad'), 0.5, 'RE', ha='center', va='center', rotation='vertical', fontsize=y_fsz) format_offset(fig, int(plot_config.get('3-way plot', 'ytick_fontsize') * .8)) if not dry_run: outfilename = os.path.join(outpath, "3.pdf") fig.savefig(outfilename, bbox_inches='tight') print("Wrote", outfilename) fig.clf() # refresh for next section print("Doing four positions analysis") results = get_four_position_effects(counts_table, positions, group_label=group_label) summary += make_summary(results) max_results[4] = max(results[p]['rel_entropy'] for p in results) if not dry_run: outfilename = os.path.join(outpath, "4.json") util.dump_loglin_stats(results, outfilename) LOGGER.output_file(outfilename, label="analysis4") fig = get_four_position_fig(results, positions, plot_config.get('4-way plot', 'figsize'), group_label=group_label, group_ref=group_ref) fig.set_figwidth(plot_config.get('4-way plot', 'figwidth')) ax = fig.gca() x_fsz = plot_config.get('4-way plot', 'xlabel_fontsize') y_fsz = plot_config.get('4-way plot', 'ylabel_fontsize') ax.set_xlabel('Position', fontsize=x_fsz) ax.set_ylabel('RE', fontsize=y_fsz) format_offset(fig, int(plot_config.get('4-way plot', 'ytick_fontsize') * .8)) if not dry_run: outfilename = os.path.join(outpath, "4.pdf") fig.savefig(outfilename, bbox_inches='tight') print("Wrote", outfilename) fig.clf() # refresh for next section # now generate summary plot bar_width = 0.5 index = numpy.arange(4) y_lim = max(max_results.values()) y_fmt = util.FixedOrderFormatter(numpy.floor(numpy.log10(y_lim))) fig = pyplot.figure(figsize=plot_config.get('summary plot', 'figsize')) ax = fig.gca() ax.yaxis.set_major_formatter(y_fmt) bar = pyplot.bar(index, [max_results[i] for i in range(1, 5)], bar_width) pyplot.xticks(index + (bar_width / 2.), list(range(1, 5)), fontsize=plot_config.get('summary plot', 'xtick_fontsize')) x_sz = plot_config.get('summary plot', 'xlabel_fontsize') y_sz = plot_config.get('summary plot', 'ylabel_fontsize') ax.set_xlabel("Effect Order", fontsize=x_sz) ax.set_ylabel("RE$_{max}$", fontsize=y_sz) x_sz = plot_config.get('summary plot', 'xtick_fontsize') y_sz = plot_config.get('summary plot', 'ytick_fontsize') ax.tick_params(axis='x', labelsize=x_sz, pad=x_sz // 2, length=0) ax.tick_params(axis='y', labelsize=y_sz, pad=y_sz // 2) format_offset(fig, int(plot_config.get('summary plot', 'ytick_fontsize') * .8)) if not dry_run: outfilename = os.path.join(outpath, "summary.pdf") pyplot.savefig(outfilename, bbox_inches='tight') print("Wrote", outfilename) summary = LoadTable(header=['Position', 'RE', 'Deviance', 'df', 'prob', 'formula'], rows=summary, digits=2, space=2) if not dry_run: outfilename = os.path.join(outpath, "summary.txt") summary.write(outfilename, sep='\t') LOGGER.output_file(outfilename, label="summary") print(summary) pyplot.close('all') msg = "Done! Check %s for your results" % outpath return msg
def main(countsfile, outpath, countsfile2, strand_symmetry, force_overwrite, dry_run, verbose): args = locals() table = LoadTable(countsfile, sep='\t') if not dry_run: log_file_path = os.path.join(util.abspath(outpath), 'spectra_analysis.log') LOGGER.log_file_path = log_file_path LOGGER.log_message(str(args), label='vars') LOGGER.input_file(countsfile) # if there's a strand symmetry argument then we don't need a second file if strand_symmetry: group_label = 'strand' counts_table = util.spectra_table(table, group_label) if not strand_symmetry: group_label = 'group' # be sure there's two files counts_table2 = LoadTable(countsfile2, sep='\t') LOGGER.input_file(countsfile2) counts_table2 = counts_table2.with_new_column('group', lambda x: '2', columns=counts_table2.header[0]) counts_table1 = table.with_new_column('group', lambda x: '1', columns=table.header[0]) counts_table1 = util.spectra_table(counts_table1, group_label) counts_table2 = util.spectra_table(counts_table2, group_label) # now combine header = ['group'] + counts_table2.header[:-1] raw1 = counts_table1.tolist(header) raw2 = counts_table2.tolist(header) counts_table = LoadTable(header=header, rows=raw1 + raw2) if verbose: print(counts_table) # spectra table has [count, start, end, group] order # we reduce comparisons to a start base results = [] saveable = {} for start_base in counts_table.distinct_values('start'): subtable = counts_table.filtered('start == "%s"' % start_base) columns = [c for c in counts_table.header if c != 'start'] subtable = subtable.get_columns(columns) total_re, dev, df, collated, formula = log_lin.spectra_difference( subtable, group_label) r = [list(x) for x in collated.to_records(index=False)] if not strand_symmetry: grp_labels = {'1': countsfile, '2': countsfile2} grp_index = list(collated.columns).index('group') for row in r: row[grp_index] = grp_labels[row[grp_index]] p = chisqprob(dev, df) if p < 1e-6: prob = "%.2e" % p else: prob = "%.6f" % p for row in r: row.insert(0, start_base) row.append(prob) results += r significance = ["RE=%.6f" % total_re, "Dev=%.2f" % dev, "df=%d" % df, "p=%s" % p] stats = " : ".join(significance) print("Start base=%s %s" % (start_base, stats)) saveable[start_base] = dict(rel_entropy=total_re, deviance=dev, df=df, prob=p, formula=formula, stats=collated.to_json()) table = LoadTable(header=['start_base'] + list(collated.columns) + ['prob'], rows=results, digits=5).sorted(columns='ret') json_path = None outpath = util.abspath(outpath) if not dry_run: util.makedirs(outpath) json_path = os.path.join(outpath, 'spectra_analysis.json') dump_json(saveable, json_path) LOGGER.output_file(json_path) table_path = os.path.join(outpath, 'spectra_summary.txt') table.write(table_path, sep='\t') LOGGER.output_file(table_path) LOGGER.log_message(str(significance), label="significance")
def main(counts_pattern, output_path, strand_symmetric, split_dir, dry_run, force_overwrite): """export tab delimited combined counts table by appending the 12 mutation direction tables, adding a new column ``direction``.""" args = locals() output_path = abspath(output_path) if strand_symmetric and split_dir: split_dir = abspath(split_dir) else: split_dir = None # check we the glob pattern produces the correct number of files counts_files = glob.glob(counts_pattern) check_found_filenames(counts_files) counts_filename = os.path.join(output_path, 'combined_counts.txt') runlog_path = os.path.join(output_path, "combined_counts.log") if not dry_run: if not force_overwrite and (os.path.exists(counts_filename) or os.path.exists(runlog_path)): msg = "Either %s or %s already exist. Force overwrite of "\ "existing files with -F." raise ValueError(msg % (counts_filename, runlog_path)) makedirs(output_path) if split_dir: makedirs(split_dir) LOGGER.log_file_path = runlog_path LOGGER.log_message(str(args), label='vars') for fn in counts_files: LOGGER.input_file(fn, label="count_file") start_time = time.time() # run the program all_counts = [] header = None num_rows = 0 basenames = [] for fn in counts_files: basenames.append(os.path.basename(fn)) mutation = direction.findall(fn)[0] table = LoadTable(fn, sep='\t') if header is None: header = list(table.header) header.append('direction') num_rows = table.shape[0] data = table.tolist() new = [] for row in data: row.append(mutation) new.append(row) all_counts += new table = LoadTable(header=header, rows=all_counts) if strand_symmetric: table = make_strand_symmetric_table(table) if split_dir: group_subtables = get_subtables(table, group_label='direction') if not dry_run: table.write(counts_filename, sep='\t') LOGGER.output_file(counts_filename) if split_dir: for group, subtable in group_subtables: # we first assume that group is part of the filenames! fn = [bn for bn in basenames if group in bn] if len(fn) == 1: fn = fn[0] else: fn = "%s.txt" % group counts_filename = os.path.join(split_dir, fn) subtable.write(counts_filename, sep='\t') LOGGER.output_file(counts_filename) # determine runtime duration = time.time() - start_time if not dry_run: LOGGER.log_message("%.2f" % (duration / 60.), label="run duration (minutes)") print("Done!")
def nbr(countsfile, outpath, countsfile2, first_order, strand_symmetry, group_label, group_ref, plot_cfg, no_type3, format, verbose, dry_run): '''log-linear analysis of neighbouring base influence on point mutation Writes estimated statistics, figures and a run log to the specified directory outpath. See documentation for count table format requirements. ''' if no_type3: util.exclude_type3_fonts() args = locals() outpath = util.abspath(outpath) if not dry_run: util.makedirs(outpath) runlog_path = os.path.join(outpath, "analysis.log") LOGGER.log_file_path = runlog_path LOGGER.log_message(str(args), label='vars') counts_filename = util.abspath(countsfile) counts_table = util.load_table_from_delimited_file(counts_filename, sep='\t') LOGGER.input_file(counts_filename, label="countsfile1_path") positions = [c for c in counts_table.header if c.startswith('pos')] if not first_order and len(positions) != 4: raise ValueError("Requires four positions for analysis") group_label = group_label or None group_ref = group_ref or None if strand_symmetry: group_label = 'strand' group_ref = group_ref or '+' if group_label not in counts_table.header: print("ERROR: no column named 'strand', exiting.") exit(-1) if countsfile2: print("Performing 2 group analysis") group_label = group_label or 'group' group_ref = group_ref or '1' counts_table1 = counts_table.with_new_column(group_label, lambda x: '1', columns=counts_table.header[0]) fn2 = util.abspath(countsfile2) counts_table2 = util.load_table_from_delimited_file(fn2, sep='\t') LOGGER.input_file(fn2, label="countsfile2_path") counts_table2 = counts_table2.with_new_column(group_label, lambda x: '2', columns=counts_table2.header[0]) # now combine header = [group_label] + counts_table2.header[:-1] raw1 = counts_table1.tolist(header) raw2 = counts_table2.tolist(header) counts_table = LoadTable(header=header, rows=raw1 + raw2) if not dry_run: outfile = os.path.join(outpath, 'group_counts_table.txt') counts_table.write(outfile, sep='\t') LOGGER.output_file(outfile, label="group_counts") if dry_run or verbose: print() print(counts_table) print() plot_config = util.get_plot_configs(cfg_path=plot_cfg) msg = single_group(counts_table, outpath, group_label, group_ref, positions, plot_config, first_order, dry_run) print(msg)
def collate(base_path, output_path, exclude_paths, overwrite): """collates all classifier performance stats and writes to a single tsv file""" LOGGER.log_args() outpath = os.path.join(output_path, "collated.tsv.gz") logfile_path = os.path.join(output_path, "collated.log") if os.path.exists(outpath) and not overwrite: click.secho(f"Skipping. {outpath} exists. " "Use overwrite to force.", fg='green') exit(0) stat_fns = exec_command(f'find {base_path} -name' ' "*performance.json*"') stat_fns = stat_fns.splitlines() if not stat_fns: msg = f'No files matching "*performance.json*" in {base_path}' click.secho(msg, fg='red') return LOGGER.log_file_path = logfile_path records = [] keys = set() exclude_paths = [] if exclude_paths is None else exclude_paths.split(',') num_skipped = 0 for fn in tqdm(stat_fns, ncols=80): if skip_path(exclude_paths, fn): num_skipped += 1 LOGGER.log_message(fn, label="SKIPPED FILE") continue LOGGER.input_file(fn) data = load_json(fn) labels = data['classification_report']['labels'] fscores = data['classification_report']['f-score'] row = { "stat_path": fn, "classifier_path": data["classifier_path"], "auc": data["auc"], "algorithm": data["classifier_label"], "mean_precision": data["mean_precision"], f"fscore({labels[0]})": fscores[0], f"fscore({labels[1]})": fscores[1], 'balanced_accuracy': data['balanced_accuracy'] } row.update(data["feature_params"]) keys.update(row.keys()) records.append(row) columns = sorted(keys) rows = list(map(lambda r: [r.get(c, None) for c in columns], records)) table = LoadTable(header=columns, rows=rows) table = table.sorted(reverse="auc") table = table.with_new_column( "name", lambda x: model_name_from_features(*x), columns=["flank_size", "feature_dim", "usegc", "proximal"]) table = table.with_new_column("size", sample_size_from_path, columns="classifier_path") table.write(outpath) LOGGER.output_file(outpath) # make summary statistics via grouping by factors factors = [ "algorithm", "name", "flank_size", "feature_dim", "proximal", "usegc", "size" ] summary = summary_stat_table(table, factors=factors) outpath = os.path.join(output_path, "summary_statistics.tsv.gz") summary.write(outpath) LOGGER.output_file(outpath) if num_skipped: click.secho("Skipped %d files that matched exclude_paths" % num_skipped, fg='red')