def run_bootstrap_net_correlation(run_parameters): """ perform gene prioritization using bootstrap sampling and network smoothing Args: run_parameters: parameter set dictionary. """ run_parameters["results_tmp_directory"] = kn.create_dir(run_parameters["results_directory"], 'tmp') gg_network_name_full_path = run_parameters['gg_network_name_full_path'] network_mat, unique_gene_names = kn.get_sparse_network_matrix(gg_network_name_full_path) network_mat = normalize(network_mat, norm="l1", axis=0) phenotype_df = kn.get_spreadsheet_df(run_parameters["phenotype_name_full_path"]) spreadsheet_df = kn.get_spreadsheet_df(run_parameters["spreadsheet_name_full_path"]) spreadsheet_genes_as_input = spreadsheet_df.index.values phenotype_df = phenotype_df.T spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names) spreadsheet_df = zscore_dataframe(spreadsheet_df) sample_smooth, iterations = kn.smooth_matrix_with_rwr(spreadsheet_df.as_matrix(), network_mat.T, run_parameters) spreadsheet_df = pd.DataFrame(sample_smooth, index=spreadsheet_df.index, columns=spreadsheet_df.columns) baseline_array = np.ones(network_mat.shape[0]) / network_mat.shape[0] baseline_array = kn.smooth_matrix_with_rwr(baseline_array, network_mat, run_parameters)[0] number_of_jobs = len(phenotype_df.index) jobs_id = range(0, number_of_jobs) zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_df, network_mat, spreadsheet_genes_as_input, baseline_array, jobs_id) dstutil.parallelize_processes_locally(run_bootstrap_net_correlation_worker, zipped_arguments, number_of_jobs) write_phenotype_data_all(run_parameters) kn.remove_dir(run_parameters["results_tmp_directory"])
def run_cc_net_similarity(run_parameters): """ wrapper: call sequence to perform signature analysis with random walk smoothing and bootstrapped similarity and save results. Args: run_parameters: parameter set dictionary. """ tmp_dir = 'tmp_cc_similarity_' run_parameters = update_tmp_directory(run_parameters, tmp_dir) expression_name = run_parameters["spreadsheet_name_full_path"] signature_name = run_parameters["signature_name_full_path" ] gg_network_name = run_parameters['gg_network_name_full_path' ] similarity_measure = run_parameters["similarity_measure" ] number_of_bootstraps = run_parameters['number_of_bootstraps' ] processing_method = run_parameters['processing_method' ] expression_df = kn.get_spreadsheet_df(expression_name) signature_df = kn.get_spreadsheet_df(signature_name ) samples_names = expression_df.columns signatures_names = signature_df.columns signatures_names = [i.split('.')[0] for i in signatures_names] signature_df.columns = signatures_names network_mat, unique_gene_names = kn.get_sparse_network_matrix(gg_network_name) # network_mat = kn.normalize_sparse_mat_by_diagonal(network_mat) expression_df = kn.update_spreadsheet_df(expression_df, unique_gene_names) signature_df = kn.update_spreadsheet_df(signature_df, unique_gene_names) expression_mat = expression_df.as_matrix() signature_mat = signature_df.as_matrix() expression_mat, iterations = kn.smooth_matrix_with_rwr(expression_mat, network_mat, run_parameters) signature_mat, iterations = kn.smooth_matrix_with_rwr(signature_mat, network_mat, run_parameters) expression_df.iloc[:] = expression_mat signature_df.iloc[:] = signature_mat if processing_method == 'serial': for sample in range(0, number_of_bootstraps): run_cc_similarity_signature_worker(expression_df, signature_df, run_parameters, sample) elif processing_method == 'parallel': find_and_save_cc_similarity_parallel(expression_df, signature_df, run_parameters, number_of_bootstraps) else: raise ValueError('processing_method contains bad value.') # consensus_df = form_consensus_df(run_parameters, expression_df, signature_df) similarity_df = assemble_similarity_df(expression_df, signature_df, run_parameters) similarity_df = pd.DataFrame(similarity_df.values, index=samples_names, columns=signatures_names) save_final_samples_signature(similarity_df, run_parameters) save_best_match_signature(similarity_df, run_parameters) kn.remove_dir(run_parameters["tmp_directory"])
def run_net_nmf(run_parameters): """ wrapper: call sequence to perform network based stratification and write results. Args: run_parameters: parameter set dictionary. """ np.random.seed(0) number_of_clusters = run_parameters['number_of_clusters'] gg_network_name_full_path = run_parameters['gg_network_name_full_path'] spreadsheet_name_full_path = run_parameters['spreadsheet_name_full_path'] network_mat, \ unique_gene_names = kn.get_sparse_network_matrix(gg_network_name_full_path) network_mat = kn.normalize_sparse_mat_by_diagonal(network_mat) lap_diag, lap_pos = kn.form_network_laplacian_matrix(network_mat) spreadsheet_df = kn.get_spreadsheet_df(spreadsheet_name_full_path) spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names) sample_names = spreadsheet_df.columns spreadsheet_mat = spreadsheet_df.values spreadsheet_mat, \ iterations = kn.smooth_matrix_with_rwr (spreadsheet_mat, network_mat, run_parameters) spreadsheet_mat = kn.get_quantile_norm_matrix(spreadsheet_mat) h_mat = kn.perform_net_nmf(spreadsheet_mat, lap_pos, lap_diag, run_parameters) linkage_matrix = np.zeros( (spreadsheet_mat.shape[1], spreadsheet_mat.shape[1])) sample_perm = np.arange(0, spreadsheet_mat.shape[1]) linkage_matrix = kn.update_linkage_matrix(h_mat, sample_perm, linkage_matrix) labels = kn.perform_kmeans(linkage_matrix, number_of_clusters) distance_matrix = pairwise_distances( h_mat.T, n_jobs=-1) # [n_samples, n_features]. Use all available cores save_consensus_clustering(linkage_matrix, sample_names, labels, run_parameters) calculate_and_save_silhouette_scores(distance_matrix, sample_names, labels, run_parameters) save_final_samples_clustering(sample_names, labels, run_parameters) save_spreadsheet_and_variance_heatmap(spreadsheet_df, labels, run_parameters)
def run_net_similarity(run_parameters): """ Run random walk first to smooth expression and signature then perform similarity analysis and save the similarity matrix. Args: run_parameters: parameter set dictionary. """ expression_name = run_parameters["spreadsheet_name_full_path"] signature_name = run_parameters["signature_name_full_path"] gg_network_name = run_parameters['gg_network_name_full_path'] similarity_measure = run_parameters["similarity_measure"] expression_df = kn.get_spreadsheet_df(expression_name) signature_df = kn.get_spreadsheet_df(signature_name) samples_names = expression_df.columns signatures_names = signature_df.columns signatures_names = [i.split('.')[0] for i in signatures_names] signature_df.columns = signatures_names network_mat, unique_gene_names = kn.get_sparse_network_matrix( gg_network_name) # network_mat = kn.normalize_sparse_mat_by_diagonal(network_mat) expression_df = kn.update_spreadsheet_df(expression_df, unique_gene_names) signature_df = kn.update_spreadsheet_df(signature_df, unique_gene_names) expression_mat = expression_df.as_matrix() signature_mat = signature_df.as_matrix() expression_mat, iterations = kn.smooth_matrix_with_rwr( expression_mat, network_mat, run_parameters) signature_mat, iterations = kn.smooth_matrix_with_rwr( signature_mat, network_mat, run_parameters) expression_df.iloc[:] = expression_mat signature_df.iloc[:] = signature_mat similarity_mat = generate_similarity_mat(expression_df, signature_df, similarity_measure) # similarity_mat = map_similarity_range(similarity_mat, 0) similarity_df = pd.DataFrame(similarity_mat, index=samples_names, columns=signatures_names) save_final_samples_signature(similarity_df, run_parameters) save_best_match_signature(similarity_df, run_parameters)
def run_net_similarity(run_parameters): """ Run random walk first to smooth expression and signature then perform similarity analysis and save the similarity matrix. Args: run_parameters: parameter set dictionary. """ expression_name = run_parameters["spreadsheet_name_full_path"] signature_name = run_parameters["signature_name_full_path" ] gg_network_name = run_parameters['gg_network_name_full_path' ] similarity_measure = run_parameters["similarity_measure" ] expression_df = kn.get_spreadsheet_df(expression_name) signature_df = kn.get_spreadsheet_df( signature_name) expression_col_names = expression_df.columns signature_col_names = signature_df.columns #--------------------- network_mat, \ unique_gene_names = kn.get_sparse_network_matrix(gg_network_name) expression_df = kn.update_spreadsheet_df(expression_df, unique_gene_names) signature_df = kn.update_spreadsheet_df( signature_df, unique_gene_names) #--------------------- expression_mat = expression_df.values signature_mat = signature_df.values expression_mat, \ iterations = kn.smooth_matrix_with_rwr(expression_mat, network_mat, run_parameters) signature_mat, \ iterations = kn.smooth_matrix_with_rwr( signature_mat, network_mat, run_parameters) expression_df.iloc[:] = expression_mat signature_df.iloc [:] = signature_mat # --------------------------------------------- similarity_mat = generate_similarity_mat(expression_df, signature_df,similarity_measure) # --------------------------------------------- similarity_df = pd.DataFrame( similarity_mat, index = expression_col_names, columns = signature_col_names ) save_final_expression_signature( similarity_df, run_parameters ) save_best_match_signature ( similarity_df, run_parameters )
def run_net_correlation(run_parameters): """ perform gene prioritization with network smoothing Args: run_parameters: parameter set dictionary. """ max_cpu = run_parameters["max_cpu"] run_parameters["results_tmp_directory"] = kn.create_dir( run_parameters["results_directory"], 'tmp') gg_network_name_full_path = run_parameters['gg_network_name_full_path'] network_mat, unique_gene_names = kn.get_sparse_network_matrix( gg_network_name_full_path) network_mat = normalize(network_mat, norm="l1", axis=0) phenotype_df = kn.get_spreadsheet_df( run_parameters["phenotype_name_full_path"]) spreadsheet_df = kn.get_spreadsheet_df( run_parameters["spreadsheet_name_full_path"]) spreadsheet_genes_as_input = spreadsheet_df.index.values phenotype_df = phenotype_df.T spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names) spreadsheet_df = zscore_dataframe(spreadsheet_df) sample_smooth, iterations = kn.smooth_matrix_with_rwr( spreadsheet_df.values, network_mat.T, run_parameters) spreadsheet_df = pd.DataFrame(sample_smooth, index=spreadsheet_df.index, columns=spreadsheet_df.columns) baseline_array = np.ones(network_mat.shape[0]) / network_mat.shape[0] baseline_array = kn.smooth_matrix_with_rwr(baseline_array, network_mat, run_parameters)[0] #----------------------------------------------------------------------------------------- # Partition the phenotype dataframe (partition size = MaxCPU) #----------------------------------------------------------------------------------------- len_phenotype = len(phenotype_df.index) array_of_jobs = range(0, len_phenotype) if (len_phenotype <= max_cpu): jobs_id = array_of_jobs number_of_jobs = len(jobs_id) zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_df, network_mat, spreadsheet_genes_as_input, baseline_array, jobs_id) dstutil.parallelize_processes_locally(run_net_correlation_worker, zipped_arguments, number_of_jobs) write_phenotype_data_all(run_parameters) #----------------------------------------------------------------------------------------- else: for i in range(0, len_phenotype, max_cpu): jobs_id = array_of_jobs[i:i + max_cpu] number_of_jobs = len(jobs_id) #----------------------------------------------------------------------------------------- zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_df, network_mat, spreadsheet_genes_as_input, baseline_array, jobs_id) dstutil.parallelize_processes_locally(run_net_correlation_worker, zipped_arguments, number_of_jobs) write_phenotype_data_all(run_parameters) #----------------------------------------------------------------------------------------- kn.remove_dir(run_parameters["results_tmp_directory"])
def run_cc_net_nmf(run_parameters): """ wrapper: call sequence to perform network based stratification with consensus clustering and write results. Args: run_parameters: parameter set dictionary. """ tmp_dir = 'tmp_cc_net_nmf' run_parameters = update_tmp_directory(run_parameters, tmp_dir) processing_method = run_parameters['processing_method'] number_of_clusters = run_parameters['number_of_clusters'] number_of_bootstraps = run_parameters['number_of_bootstraps'] gg_network_name_full_path = run_parameters['gg_network_name_full_path'] spreadsheet_name_full_path = run_parameters['spreadsheet_name_full_path'] network_mat, \ unique_gene_names = kn.get_sparse_network_matrix(gg_network_name_full_path) network_mat = kn.normalize_sparse_mat_by_diagonal(network_mat) lap_diag, lap_pos = kn.form_network_laplacian_matrix(network_mat) spreadsheet_df = kn.get_spreadsheet_df(spreadsheet_name_full_path) spreadsheet_df = kn.update_spreadsheet_df(spreadsheet_df, unique_gene_names) spreadsheet_mat = spreadsheet_df.values number_of_samples = spreadsheet_mat.shape[1] sample_names = spreadsheet_df.columns if processing_method == 'serial': for sample in range(0, number_of_bootstraps): run_cc_net_nmf_clusters_worker(network_mat, spreadsheet_mat, lap_diag, lap_pos, run_parameters, sample) elif processing_method == 'parallel': find_and_save_cc_net_nmf_clusters_parallel(network_mat, spreadsheet_mat, lap_diag, lap_pos, run_parameters, number_of_bootstraps) elif processing_method == 'distribute': func_args = [ network_mat, spreadsheet_mat, lap_diag, lap_pos, run_parameters ] dependency_list = [ run_cc_net_nmf_clusters_worker, save_a_clustering_to_tmp, dstutil.determine_parallelism_locally ] cluster_ip_address = run_parameters['cluster_ip_address'] dstutil.execute_distribute_computing_job( cluster_ip_address, number_of_bootstraps, func_args, find_and_save_cc_net_nmf_clusters_parallel, dependency_list) else: raise ValueError('processing_method contains bad value.') consensus_matrix = form_consensus_matrix(run_parameters, number_of_samples) distance_matrix = pairwise_distances( consensus_matrix, n_jobs=-1) # [n_samples, n_samples] use all available cores labels = kn.perform_kmeans(consensus_matrix, number_of_clusters) save_consensus_clustering(consensus_matrix, sample_names, labels, run_parameters) calculate_and_save_silhouette_scores(distance_matrix, sample_names, labels, run_parameters) save_final_samples_clustering(sample_names, labels, run_parameters) save_spreadsheet_and_variance_heatmap(spreadsheet_df, labels, run_parameters, network_mat) kn.remove_dir(run_parameters["tmp_directory"])