def significance_wrapper((infmat, index2gene, heat_permutation, delta, sizes)): M, index2gene = hn.induce_infmat(infmat, index2gene, sorted(heat_permutation.keys())) h = hn.heat_vec(heat_permutation, index2gene) sim_mat = hn.similarity_matrix(M, h) G = hn.weighted_graph(sim_mat, index2gene, delta) return num_components_min_size(G, sizes)
def run(args): infmat = scipy.io.loadmat(args.infmat_file)[args.infmat_name] infmat_index = hnio.load_index(args.infmat_index_file) heat, heat_params = hnio.load_heat_json(args.heat_file) if args.perm_type == "heat": addtl_genes = hnio.load_genes(args.permutation_genes_file) if args.permutation_genes_file else None deltas = get_deltas_for_heat(infmat, infmat_index, heat, addtl_genes, args.num_permutations, args.parallel) elif args.perm_type == "mutations": deltas = get_deltas_for_mutations(args, infmat, infmat_index, heat_params) else: raise ValueError("Invalid mutation permutation type: %s" % args.perm_type) #find the multiple of the median delta s.t. the size of the largest CC in the real data #is <= MAX_CC_SIZE medianDelta = np.median(deltas[MIN_CC_SIZE]) M, gene_index = hn.induce_infmat(infmat, infmat_index, sorted(heat.keys())) h = hn.heat_vec(heat, gene_index) sim = hn.similarity_matrix(M, h) for i in range(1, 11): G = hn.weighted_graph(sim, gene_index, i*medianDelta) max_cc_size = max([len(cc) for cc in hn.connected_components(G)]) if max_cc_size <= MAX_CC_SIZE: break #and recommend running HotNet with that multiple and the next 4 multiples recommended_deltas = [i*medianDelta for i in range(i, i+5)] output_file = open(args.output_file, 'w') if args.output_file else sys.stdout json.dump({"parameters": vars(args), "heat_parameters": heat_params, "recommended_deltas": recommended_deltas}, output_file, indent=4) if (args.output_file): output_file.close()
def run(args): # create output directory if doesn't exist; warn if output files already exist if not os.path.exists(args.output_directory): os.makedirs(args.output_directory) dir_contents = os.listdir(args.output_directory) if JSON_OUTPUT in dir_contents or COMPONENTS_TSV in dir_contents or SIGNIFICANCE_TSV in dir_contents: print("WARNING: Output directory already contains HotNet results file(s), which will be " "overwritten. (Ctrl-c to cancel).") # load data infmat = scipy.io.loadmat(args.infmat_file)[args.infmat_name] infmat_index = hnio.load_index(args.infmat_index_file) heat, heat_params = hnio.load_heat_json(args.heat_file) # compute similarity matrix and extract connected components M, gene_index = hn.induce_infmat(infmat, infmat_index, sorted(heat.keys()), quiet=False) h = hn.heat_vec(heat, gene_index) sim = hn.similarity_matrix(M, h) G = hn.weighted_graph(sim, gene_index, args.delta) ccs = hn.connected_components(G, args.min_cc_size) # calculate significance if args.permutation_type != "none": if args.permutation_type == "heat": sizes2stats = heat_permutation_significance(args, heat, infmat, infmat_index, G) elif args.permutation_type == "mutations": if heat_params["heat_fn"] != "load_mutation_heat": raise RuntimeError("Heat scores must be based on mutation data to perform\ significance testing based on mutation data permutation.") sizes2stats = mutation_permutation_significance(args, infmat, infmat_index, G, heat_params) else: raise ValueError("Unrecognized permutation type %s" % (args.permutation_type)) #sort ccs list such that genes within components are sorted alphanumerically, and components #are sorted first by length, then alphanumerically by name of the first gene in the component ccs = [sorted(cc) for cc in ccs] ccs.sort(key=lambda comp: comp[0]) ccs.sort(key=len, reverse=True) # write output output_dict = {"parameters": vars(args), "heat_parameters": heat_params, "sizes": hn.component_sizes(ccs), "components": ccs} if args.permutation_type != "none": output_dict["statistics"] = sizes2stats hnio.write_significance_as_tsv(os.path.abspath(args.output_directory) + "/" + SIGNIFICANCE_TSV, sizes2stats) json_out = open(os.path.abspath(args.output_directory) + "/" + JSON_OUTPUT, 'w') json.dump(output_dict, json_out, indent=4) json_out.close() hnio.write_components_as_tsv(os.path.abspath(args.output_directory) + "/" + COMPONENTS_TSV, ccs)
def run(args): infmat = scipy.io.loadmat(args.infmat_file)[args.infmat_name] infmat_index = hnio.load_index(args.infmat_index_file) heat, heat_params = hnio.load_heat_json(args.heat_file) if args.perm_type == "heat": addtl_genes = hnio.load_genes( args.permutation_genes_file ) if args.permutation_genes_file else None deltas = get_deltas_for_heat(infmat, infmat_index, heat, addtl_genes, args.num_permutations, args.parallel) elif args.perm_type == "mutations": deltas = get_deltas_for_mutations(args, infmat, infmat_index, heat_params) else: raise ValueError("Invalid mutation permutation type: %s" % args.perm_type) #find the multiple of the median delta s.t. the size of the largest CC in the real data #is <= MAX_CC_SIZE medianDelta = np.median(deltas[MIN_CC_SIZE]) M, gene_index = hn.induce_infmat(infmat, infmat_index, sorted(heat.keys())) h = hn.heat_vec(heat, gene_index) sim = hn.similarity_matrix(M, h) for i in range(1, 11): G = hn.weighted_graph(sim, gene_index, i * medianDelta) max_cc_size = max([len(cc) for cc in hn.connected_components(G)]) if max_cc_size <= MAX_CC_SIZE: break #and recommend running HotNet with that multiple and the next 4 multiples recommended_deltas = [i * medianDelta for i in range(i, i + 5)] output_file = open(args.output_file, 'w') if args.output_file else sys.stdout json.dump( { "parameters": vars(args), "heat_parameters": heat_params, "recommended_deltas": recommended_deltas }, output_file, indent=4) if (args.output_file): output_file.close()
def run(args): # create output directory if doesn't exist; warn if it exists and is not empty if not os.path.exists(args.output_directory): os.makedirs(args.output_directory) if len(os.listdir(args.output_directory)) > 0: print("WARNING: Output directory is not empty. Any conflicting files will be overwritten. ") print("(Ctrl-c to cancel).") infmat = scipy.io.loadmat(args.infmat_file)[INFMAT_NAME] infmat_index = hnio.load_index(args.infmat_index_file) heat = hnio.load_heat_tsv(args.heat_file) # filter out genes with heat score less than min_heat_score heat, addtl_genes, args.min_heat_score = hnheat.filter_heat(heat, args.min_heat_score) # find delta that maximizes # CCs of size >= MIN_SIZE for each permuted data set deltas = ft.get_deltas_for_heat(infmat, infmat_index, heat, addtl_genes, args.num_permutations, args.parallel) #find the multiple of the median delta s.t. the size of the largest CC in the real data #is <= MAX_CC_SIZE medianDelta = np.median(deltas[MIN_CC_SIZE]) M, gene_index = hn.induce_infmat(infmat, infmat_index, sorted(heat.keys()), quiet=False) h = hn.heat_vec(heat, gene_index) sim = hn.similarity_matrix(M, h) for i in range(1, 11): G = hn.weighted_graph(sim, gene_index, i*medianDelta) max_cc_size = max([len(cc) for cc in hn.connected_components(G)]) if max_cc_size <= MAX_CC_SIZE: break # load interaction network edges and determine location of static HTML files for visualization edges = hnio.load_ppi_edges(args.edge_file) if args.edge_file else None index_file = '%s/viz_files/%s' % (os.path.realpath(__file__).rsplit('/', 1)[0], VIZ_INDEX) subnetworks_file = '%s/viz_files/%s' % (os.path.realpath(__file__).rsplit('/', 1)[0], VIZ_SUBNETWORKS) gene2index = dict([(gene, index) for index, gene in list(infmat_index.items())]) #and run HotNet with that multiple and the next 4 multiples run_deltas = [i*medianDelta for i in range(i, i+5)] for delta in run_deltas: # create output directory delta_out_dir = args.output_directory + "/delta_" + str(delta) if not os.path.isdir(delta_out_dir): os.mkdir(delta_out_dir) # find connected components G = hn.weighted_graph(sim, gene_index, delta) ccs = hn.connected_components(G, args.min_cc_size) # calculate significance (using all genes with heat scores) print("* Performing permuted heat statistical significance...") heat_permutations = p.permute_heat(heat, args.num_permutations, addtl_genes, args.parallel) sizes = list(range(2, 11)) print("\t- Using no. of components >= k (k \\in") print("[%s, %s]) as statistic" % (min(sizes), max(sizes))) sizes2counts = stats.calculate_permuted_cc_counts(infmat, infmat_index, heat_permutations, delta, sizes, args.parallel) real_counts = stats.num_components_min_size(G, sizes) size2real_counts = dict(list(zip(sizes, real_counts))) sizes2stats = stats.compute_statistics(size2real_counts, sizes2counts, args.num_permutations) # sort ccs list such that genes within components are sorted alphanumerically, and components # are sorted first by length, then alphanumerically by name of the first gene in the component ccs = [sorted(cc) for cc in ccs] ccs.sort(key=lambda comp: comp[0]) ccs.sort(key=len, reverse=True) # write output heat_dict = {"heat": heat, "parameters": {"heat_file": args.heat_file}} heat_out = open(os.path.abspath(delta_out_dir) + "/" + HEAT_JSON, 'w') json.dump(heat_dict, heat_out, indent=4) heat_out.close() args.heat_file = os.path.abspath(delta_out_dir) + "/" + HEAT_JSON args.delta = delta output_dict = {"parameters": vars(args), "sizes": hn.component_sizes(ccs), "components": ccs, "statistics": sizes2stats} hnio.write_significance_as_tsv(os.path.abspath(delta_out_dir) + "/" + SIGNIFICANCE_TSV, sizes2stats) json_out = open(os.path.abspath(delta_out_dir) + "/" + JSON_OUTPUT, 'w') json.dump(output_dict, json_out, indent=4) json_out.close() hnio.write_components_as_tsv(os.path.abspath(delta_out_dir) + "/" + COMPONENTS_TSV, ccs) # write visualization output if edge file given if args.edge_file: viz_data = {"delta": delta, 'subnetworks': list()} for cc in ccs: viz_data['subnetworks'].append(viz.get_component_json(cc, heat, edges, gene2index, args.network_name)) delta_viz_dir = '%s/viz/delta%s' % (args.output_directory, delta) if not os.path.isdir(delta_viz_dir): os.makedirs(delta_viz_dir) viz_out = open('%s/subnetworks.json' % delta_viz_dir, 'w') json.dump(viz_data, viz_out, indent=4) viz_out.close() shutil.copy(subnetworks_file, delta_viz_dir) if args.edge_file: viz.write_index_file(index_file, '%s/viz/%s' % (args.output_directory, VIZ_INDEX), run_deltas)
def run(args): # create output directory if doesn't exist; warn if output files already exist if not os.path.exists(args.output_directory): os.makedirs(args.output_directory) dir_contents = os.listdir(args.output_directory) if JSON_OUTPUT in dir_contents or COMPONENTS_TSV in dir_contents or SIGNIFICANCE_TSV in dir_contents: print( "WARNING: Output directory already contains HotNet results file(s), which will be " "overwritten. (Ctrl-c to cancel).") # load data infmat = scipy.io.loadmat(args.infmat_file)[args.infmat_name] infmat_index = hnio.load_index(args.infmat_index_file) heat, heat_params = hnio.load_heat_json(args.heat_file) # compute similarity matrix and extract connected components M, gene_index = hn.induce_infmat(infmat, infmat_index, sorted(heat.keys()), quiet=False) h = hn.heat_vec(heat, gene_index) sim = hn.similarity_matrix(M, h) G = hn.weighted_graph(sim, gene_index, args.delta) ccs = hn.connected_components(G, args.min_cc_size) # calculate significance if args.permutation_type != "none": if args.permutation_type == "heat": sizes2stats = heat_permutation_significance( args, heat, infmat, infmat_index, G) elif args.permutation_type == "mutations": if heat_params["heat_fn"] != "load_mutation_heat": raise RuntimeError( "Heat scores must be based on mutation data to perform\ significance testing based on mutation data permutation." ) sizes2stats = mutation_permutation_significance( args, infmat, infmat_index, G, heat_params) else: raise ValueError("Unrecognized permutation type %s" % (args.permutation_type)) #sort ccs list such that genes within components are sorted alphanumerically, and components #are sorted first by length, then alphanumerically by name of the first gene in the component ccs = [sorted(cc) for cc in ccs] ccs.sort(key=lambda comp: comp[0]) ccs.sort(key=len, reverse=True) # write output output_dict = { "parameters": vars(args), "heat_parameters": heat_params, "sizes": hn.component_sizes(ccs), "components": ccs } if args.permutation_type != "none": output_dict["statistics"] = sizes2stats hnio.write_significance_as_tsv( os.path.abspath(args.output_directory) + "/" + SIGNIFICANCE_TSV, sizes2stats) json_out = open( os.path.abspath(args.output_directory) + "/" + JSON_OUTPUT, 'w') json.dump(output_dict, json_out, indent=4) json_out.close() hnio.write_components_as_tsv( os.path.abspath(args.output_directory) + "/" + COMPONENTS_TSV, ccs)
def run(args): # create output directory if doesn't exist; warn if it exists and is not empty if not os.path.exists(args.output_directory): os.makedirs(args.output_directory) if len(os.listdir(args.output_directory)) > 0: print("WARNING: Output directory is not empty. Any conflicting files will be overwritten. " "(Ctrl-c to cancel).") infmat = scipy.io.loadmat(args.infmat_file)[INFMAT_NAME] infmat_index = hnio.load_index(args.infmat_index_file) heat = hnio.load_heat_tsv(args.heat_file) #filter out genes with heat score less than min_heat_score heat, addtl_genes, args.min_heat_score = hnheat.filter_heat(heat, args.min_heat_score) #find delta that maximizes # CCs of size >= MIN_SIZE for each permuted data set deltas = ft.get_deltas_for_heat(infmat, infmat_index, heat, addtl_genes, args.num_permutations, args.parallel) #find the multiple of the median delta s.t. the size of the largest CC in the real data #is <= MAX_CC_SIZE medianDelta = np.median(deltas[MIN_CC_SIZE]) M, gene_index = hn.induce_infmat(infmat, infmat_index, sorted(heat.keys()), quiet=False) h = hn.heat_vec(heat, gene_index) sim = hn.similarity_matrix(M, h) for i in range(1, 11): G = hn.weighted_graph(sim, gene_index, i*medianDelta) max_cc_size = max([len(cc) for cc in hn.connected_components(G)]) if max_cc_size <= MAX_CC_SIZE: break #and run HotNet with that multiple and the next 4 multiples run_deltas = [i*medianDelta for i in range(i, i+5)] for delta in run_deltas: #create output directory delta_out_dir = args.output_directory + "/delta_" + str(delta) if not os.path.isdir(delta_out_dir): os.mkdir(delta_out_dir) #find connected components G = hn.weighted_graph(sim, gene_index, delta) ccs = hn.connected_components(G, args.min_cc_size) # calculate significance (using all genes with heat scores) print "* Performing permuted heat statistical significance..." heat_permutations = p.permute_heat(heat, args.num_permutations, addtl_genes, args.parallel) sizes = range(2, 11) print "\t- Using no. of components >= k (k \\in", print "[%s, %s]) as statistic" % (min(sizes), max(sizes)) sizes2counts = stats.calculate_permuted_cc_counts(infmat, infmat_index, heat_permutations, delta, sizes, args.parallel) real_counts = stats.num_components_min_size(G, sizes) size2real_counts = dict(zip(sizes, real_counts)) sizes2stats = stats.compute_statistics(size2real_counts, sizes2counts, args.num_permutations) #sort ccs list such that genes within components are sorted alphanumerically, and components #are sorted first by length, then alphanumerically by name of the first gene in the component ccs = [sorted(cc) for cc in ccs] ccs.sort(key=lambda comp: comp[0]) ccs.sort(key=len, reverse=True) # write output output_dict = {"parameters": vars(args), "sizes": hn.component_sizes(ccs), "components": ccs, "statistics": sizes2stats} hnio.write_significance_as_tsv(os.path.abspath(delta_out_dir) + "/" + SIGNIFICANCE_TSV, sizes2stats) json_out = open(os.path.abspath(delta_out_dir) + "/" + JSON_OUTPUT, 'w') json.dump(output_dict, json_out, indent=4) json_out.close() hnio.write_components_as_tsv(os.path.abspath(delta_out_dir) + "/" + COMPONENTS_TSV, ccs)
def run(args): # create output directory if doesn't exist; warn if it exists and is not empty if not os.path.exists(args.output_directory): os.makedirs(args.output_directory) if len(os.listdir(args.output_directory)) > 0: print("WARNING: Output directory is not empty. Any conflicting files will be overwritten. " "(Ctrl-c to cancel).") infmat = scipy.io.loadmat(args.infmat_file)[INFMAT_NAME] infmat_index = hnio.load_index(args.infmat_index_file) heat = hnio.load_heat_tsv(args.heat_file) # filter out genes with heat score less than min_heat_score heat, addtl_genes, args.min_heat_score = hnheat.filter_heat(heat, args.min_heat_score) # find delta that maximizes # CCs of size >= MIN_SIZE for each permuted data set deltas = ft.get_deltas_for_heat(infmat, infmat_index, heat, addtl_genes, args.num_permutations, args.parallel) #find the multiple of the median delta s.t. the size of the largest CC in the real data #is <= MAX_CC_SIZE medianDelta = np.median(deltas[MIN_CC_SIZE]) M, gene_index = hn.induce_infmat(infmat, infmat_index, sorted(heat.keys()), quiet=False) h = hn.heat_vec(heat, gene_index) sim = hn.similarity_matrix(M, h) for i in range(1, 11): G = hn.weighted_graph(sim, gene_index, i*medianDelta) max_cc_size = max([len(cc) for cc in hn.connected_components(G)]) if max_cc_size <= MAX_CC_SIZE: break # load interaction network edges and determine location of static HTML files for visualization edges = hnio.load_ppi_edges(args.edge_file) if args.edge_file else None index_file = '%s/viz_files/%s' % (os.path.realpath(__file__).rsplit('/', 1)[0], VIZ_INDEX) subnetworks_file = '%s/viz_files/%s' % (os.path.realpath(__file__).rsplit('/', 1)[0], VIZ_SUBNETWORKS) gene2index = dict([(gene, index) for index, gene in infmat_index.iteritems()]) #and run HotNet with that multiple and the next 4 multiples run_deltas = [i*medianDelta for i in range(i, i+5)] for delta in run_deltas: # create output directory delta_out_dir = args.output_directory + "/delta_" + str(delta) if not os.path.isdir(delta_out_dir): os.mkdir(delta_out_dir) # find connected components G = hn.weighted_graph(sim, gene_index, delta) ccs = hn.connected_components(G, args.min_cc_size) # calculate significance (using all genes with heat scores) print "* Performing permuted heat statistical significance..." heat_permutations = p.permute_heat(heat, args.num_permutations, addtl_genes, args.parallel) sizes = range(2, 11) print "\t- Using no. of components >= k (k \\in", print "[%s, %s]) as statistic" % (min(sizes), max(sizes)) sizes2counts = stats.calculate_permuted_cc_counts(infmat, infmat_index, heat_permutations, delta, sizes, args.parallel) real_counts = stats.num_components_min_size(G, sizes) size2real_counts = dict(zip(sizes, real_counts)) sizes2stats = stats.compute_statistics(size2real_counts, sizes2counts, args.num_permutations) # sort ccs list such that genes within components are sorted alphanumerically, and components # are sorted first by length, then alphanumerically by name of the first gene in the component ccs = [sorted(cc) for cc in ccs] ccs.sort(key=lambda comp: comp[0]) ccs.sort(key=len, reverse=True) # write output heat_dict = {"heat": heat, "parameters": {"heat_file": args.heat_file}} heat_out = open(os.path.abspath(delta_out_dir) + "/" + HEAT_JSON, 'w') json.dump(heat_dict, heat_out, indent=4) heat_out.close() args.heat_file = os.path.abspath(delta_out_dir) + "/" + HEAT_JSON args.delta = delta output_dict = {"parameters": vars(args), "sizes": hn.component_sizes(ccs), "components": ccs, "statistics": sizes2stats} hnio.write_significance_as_tsv(os.path.abspath(delta_out_dir) + "/" + SIGNIFICANCE_TSV, sizes2stats) json_out = open(os.path.abspath(delta_out_dir) + "/" + JSON_OUTPUT, 'w') json.dump(output_dict, json_out, indent=4) json_out.close() hnio.write_components_as_tsv(os.path.abspath(delta_out_dir) + "/" + COMPONENTS_TSV, ccs) # write visualization output if edge file given if args.edge_file: viz_data = {"delta": delta, 'subnetworks': list()} for cc in ccs: viz_data['subnetworks'].append(viz.get_component_json(cc, heat, edges, gene2index, args.network_name)) delta_viz_dir = '%s/viz/delta%s' % (args.output_directory, delta) if not os.path.isdir(delta_viz_dir): os.makedirs(delta_viz_dir) viz_out = open('%s/subnetworks.json' % delta_viz_dir, 'w') json.dump(viz_data, viz_out, indent=4) viz_out.close() shutil.copy(subnetworks_file, delta_viz_dir) if args.edge_file: viz.write_index_file(index_file, '%s/viz/%s' % (args.output_directory, VIZ_INDEX), run_deltas)