def main(countsfile, outpath, countsfile2, strand_symmetry, force_overwrite, dry_run, verbose): args = locals() table = LoadTable(countsfile, sep='\t') if not dry_run: log_file_path = os.path.join(util.abspath(outpath), 'spectra_analysis.log') LOGGER.log_file_path = log_file_path LOGGER.log_message(str(args), label='vars') LOGGER.input_file(countsfile) # if there's a strand symmetry argument then we don't need a second file if strand_symmetry: group_label = 'strand' counts_table = util.spectra_table(table, group_label) if not strand_symmetry: group_label = 'group' # be sure there's two files counts_table2 = LoadTable(countsfile2, sep='\t') LOGGER.input_file(countsfile2) counts_table2 = counts_table2.with_new_column('group', lambda x: '2', columns=counts_table2.header[0]) counts_table1 = table.with_new_column('group', lambda x: '1', columns=table.header[0]) counts_table1 = util.spectra_table(counts_table1, group_label) counts_table2 = util.spectra_table(counts_table2, group_label) # now combine header = ['group'] + counts_table2.header[:-1] raw1 = counts_table1.tolist(header) raw2 = counts_table2.tolist(header) counts_table = LoadTable(header=header, rows=raw1 + raw2) if verbose: print(counts_table) # spectra table has [count, start, end, group] order # we reduce comparisons to a start base results = [] saveable = {} for start_base in counts_table.distinct_values('start'): subtable = counts_table.filtered('start == "%s"' % start_base) columns = [c for c in counts_table.header if c != 'start'] subtable = subtable.get_columns(columns) total_re, dev, df, collated, formula = log_lin.spectra_difference( subtable, group_label) r = [list(x) for x in collated.to_records(index=False)] if not strand_symmetry: grp_labels = {'1': countsfile, '2': countsfile2} grp_index = list(collated.columns).index('group') for row in r: row[grp_index] = grp_labels[row[grp_index]] p = chisqprob(dev, df) if p < 1e-6: prob = "%.2e" % p else: prob = "%.6f" % p for row in r: row.insert(0, start_base) row.append(prob) results += r significance = ["RE=%.6f" % total_re, "Dev=%.2f" % dev, "df=%d" % df, "p=%s" % p] stats = " : ".join(significance) print("Start base=%s %s" % (start_base, stats)) saveable[start_base] = dict(rel_entropy=total_re, deviance=dev, df=df, prob=p, formula=formula, stats=collated.to_json()) table = LoadTable(header=['start_base'] + list(collated.columns) + ['prob'], rows=results, digits=5).sorted(columns='ret') json_path = None outpath = util.abspath(outpath) if not dry_run: util.makedirs(outpath) json_path = os.path.join(outpath, 'spectra_analysis.json') dump_json(saveable, json_path) LOGGER.output_file(json_path) table_path = os.path.join(outpath, 'spectra_summary.txt') table.write(table_path, sep='\t') LOGGER.output_file(table_path) LOGGER.log_message(str(significance), label="significance")
def collate(base_path, output_path, exclude_paths, overwrite): """collates all classifier performance stats and writes to a single tsv file""" LOGGER.log_args() outpath = os.path.join(output_path, "collated.tsv.gz") logfile_path = os.path.join(output_path, "collated.log") if os.path.exists(outpath) and not overwrite: click.secho(f"Skipping. {outpath} exists. " "Use overwrite to force.", fg='green') exit(0) stat_fns = exec_command(f'find {base_path} -name' ' "*performance.json*"') stat_fns = stat_fns.splitlines() if not stat_fns: msg = f'No files matching "*performance.json*" in {base_path}' click.secho(msg, fg='red') return LOGGER.log_file_path = logfile_path records = [] keys = set() exclude_paths = [] if exclude_paths is None else exclude_paths.split(',') num_skipped = 0 for fn in tqdm(stat_fns, ncols=80): if skip_path(exclude_paths, fn): num_skipped += 1 LOGGER.log_message(fn, label="SKIPPED FILE") continue LOGGER.input_file(fn) data = load_json(fn) labels = data['classification_report']['labels'] fscores = data['classification_report']['f-score'] row = { "stat_path": fn, "classifier_path": data["classifier_path"], "auc": data["auc"], "algorithm": data["classifier_label"], "mean_precision": data["mean_precision"], f"fscore({labels[0]})": fscores[0], f"fscore({labels[1]})": fscores[1], 'balanced_accuracy': data['balanced_accuracy'] } row.update(data["feature_params"]) keys.update(row.keys()) records.append(row) columns = sorted(keys) rows = list(map(lambda r: [r.get(c, None) for c in columns], records)) table = LoadTable(header=columns, rows=rows) table = table.sorted(reverse="auc") table = table.with_new_column( "name", lambda x: model_name_from_features(*x), columns=["flank_size", "feature_dim", "usegc", "proximal"]) table = table.with_new_column("size", sample_size_from_path, columns="classifier_path") table.write(outpath) LOGGER.output_file(outpath) # make summary statistics via grouping by factors factors = [ "algorithm", "name", "flank_size", "feature_dim", "proximal", "usegc", "size" ] summary = summary_stat_table(table, factors=factors) outpath = os.path.join(output_path, "summary_statistics.tsv.gz") summary.write(outpath) LOGGER.output_file(outpath) if num_skipped: click.secho("Skipped %d files that matched exclude_paths" % num_skipped, fg='red')