def generate_DF(ref): ref_arr = numpy.asarray(ref) qval = qvalue.estimate(ref_arr[:,[1]]) q_val = numpy.asarray(qval) df_all = numpy.append(ref_arr, q_val, 1) insig_ref = df_all[df_all[:,2] > 0.2] print df_all
def adjust_p_values(self): """ Calculates q-values for all p-values. """ # Fetch all p-values p_values = list() for chrom in self.bin_dict.keys(): for bin in self.bin_dict[chrom].keys(): p_values.append(self.bin_dict[chrom][bin][2]) # convert to numpy array and calculate q-values p_array = numpy.array(p_values) q_array = qvalue.estimate(p_array) # build p to q-value dict p_to_q_value_dict = dict() for i in range(len(q_array)): p_to_q_value_dict[p_array[i]] = q_array[i] # Adds a q-value at the end of the list for each bin for chrom in self.bin_dict.keys(): for bin in self.bin_dict[chrom].keys(): pval = self.bin_dict[chrom][bin][2] self.bin_dict[chrom][bin].append(p_to_q_value_dict[pval])
def find_incoming_edges(self, t): """ find incoming edges that build a v-structure a-->t<--b with a) corr(a,t) b) corr(b,t) c) ind(a,b) d) corr(a,b t) input: t : index of the gene t """ # incoming edges are associated with the gene of interest... pv_genes = self.genecorr_reader.getRows([t])[0] idx_assoc = qvalue.estimate(pv_genes) < self.thresh_corr idx_assoc[t] = False if not (idx_assoc).any(): return None, None # independent of each other _idx_assoc = np.nonzero(idx_assoc)[0] pv_genes = self.genecorr_reader.getRows(_idx_assoc)[:, idx_assoc] idx_vstruct = np.nonzero(idx_assoc)[0] vstruct = pv_genes > self.thresh_ind idx_ind = vstruct.any(axis=1) idx_vstruct = idx_vstruct[idx_ind] vstruct = vstruct[idx_ind][:, idx_ind] if not (idx_vstruct).any(): return None, None # becoming dependent once we condition on the gene under observation Yv = self.phenoreader.getRows(idx_vstruct) Yt = self.phenoreader.getRows([t])[0] _, pv_cond = pcor.pcorParallel(Yv, Yt) qv_cond = qvalue.estimate(pv_cond) vstruct *= qv_cond < self.thresh_corr idx_partcorr = vstruct.any(axis=0) if not (idx_partcorr).any(): return None, None vstruct = vstruct[idx_partcorr][:, idx_partcorr] idx_vstruct = idx_vstruct[idx_partcorr] return vstruct, idx_vstruct
def association_scan(self): print "Association scan... ", K = self.kernel_testing(genetics=False, confounders=True) pval = testing.interface(self.S_centered, self.Y, K, I = None, model='LMM', parallel=False, # TODO parallelize file_directory = None, jobs = 0)[0] print "[DONE]" # convert to qvalues qval = qvalue.estimate(pval) return qval, pval
def find_incoming_edges(self, t): """ find incoming edges that build a v-structure a-->t<--b with a) corr(a,t) b) corr(b,t) c) ind(a,b) d) corr(a,b t) input: t : index of the gene t """ # incoming edges are associated with the gene of interest... pv_genes = self.genecorr_reader.getRows([t])[0] idx_assoc = qvalue.estimate(pv_genes) < self.thresh_corr idx_assoc[t] = False if not (idx_assoc).any(): return None, None # independent of each other _idx_assoc = np.nonzero(idx_assoc)[0] pv_genes = self.genecorr_reader.getRows(_idx_assoc)[:, idx_assoc] idx_vstruct = np.nonzero(idx_assoc)[0] vstruct = pv_genes > self.thresh_ind idx_ind = vstruct.any(axis=1) idx_vstruct = idx_vstruct[idx_ind] vstruct = vstruct[idx_ind][:, idx_ind] if not (idx_vstruct).any(): return None, None # becoming dependent once we condition on the gene under observation Yv = self.phenoreader.getRows(idx_vstruct) Yt = self.phenoreader.getRows([t])[0] _, pv_cond = pcor.pcorParallel(Yv, Yt) qv_cond = qvalue.estimate(pv_cond) vstruct *= (qv_cond < self.thresh_corr) idx_partcorr = vstruct.any(axis=0) if not (idx_partcorr).any(): return None, None vstruct = vstruct[idx_partcorr][:, idx_partcorr] idx_vstruct = idx_vstruct[idx_partcorr] return vstruct, idx_vstruct
def panama_step(self): X = self.get_latent() K = self.kernel_testing(genetics=False, confounders=False) K = scaleK(K) if len(self.candidate_associations) != 0: covs = self.S_centered[:, self.candidate_associations].copy() else: covs = None pv = testing.interface(self.S_centered, X[:, :self.Q], K, covs=covs, model = "LMM", parallel = False, jobs = 0, file_directory=None)[0] # TODO cleanup # Number of tests conducted num_tests = X.shape[1]*self.S.shape[1]*self.iteration qv = qvalue.estimate(pv, m=num_tests) # Set the qvalue of the current associations to 1 qv[self.candidate_associations,:] = 1 # Greedily construct addition set by adding the BEST (lowest qv) SNP for each factor # (if significant) new_candidates = [] for i in xrange(qv.shape[1]): i_best = qv[:,i].argmin() qv_best = qv[:,i].min() # if significant, add it if qv_best<=self.FDR_associations: new_candidates.append(i_best) # and set the corrisponding qvalue to 1 qv[i_best,:] = 1 # Add candidates nc_old = len(self.candidate_associations) self.candidate_associations.extend(new_candidates) nc = len(self.candidate_associations) dl = nc-nc_old assert len(np.unique(self.candidate_associations)) == len(self.candidate_associations) return dl
def fix_rows(genes, pvalues): pvalues = numpy.array(pvalues) _qvalues = qvalue.estimate(pvalues) _qvalues = {pvalues[i]:q for i,q in enumerate(_qvalues)} F = Utilities.WDBIF keys = genes.keys() for gene in keys: rows = genes[gene] input = [(math.fabs(float(r[F.WEIGHT])), float(r[F.GENE_PVALUE]), _qvalues[float(r[F.GENE_PVALUE])], r[F.SNP]) for k,r in rows.iteritems()] w = sum(map(lambda x: x[0], input)) if w == 0: logging.info("Cannot handle null weight for %s", gene) a_p = None a_q = None else: a_p = str(sum(map(lambda x: math.fabs(x[0])*x[1], input)) / w) a_q = str(sum(map(lambda x: math.fabs(x[0])*x[2], input)) / w) n = str(len(input)) genes[gene] = [(r[F.SNP], r[F.GENE], r[F.GENE_NAME], r[F.REFERENCE_ALLELE], r[F.EFFECT_ALLELE], r[F.WEIGHT], n, r[F.GENE_R2], a_p, a_q) for k,r in rows.iteritems()] return genes
def reorder(data, data_headers, array_order, comp_group_list, probeset_db, include_raw_data, array_type, norm, fl, logvalues=True, blanksPresent=False): ###array_order gives the final level order sorted, followed by the original index order as a tuple expbuilder_value_db = {} group_name_db = {} summary_filtering_stats = {} pval_summary_db = {} replicates = 'yes' stat_result_names = ['avg-', 'log_fold-', 'fold-', 'rawp-', 'adjp-'] group_summary_result_names = ['avg-'] ### Define expression variables try: probability_statistic = fl.ProbabilityStatistic() except Exception: probability_statistic = 'unpaired t-test' try: gene_exp_threshold = math.log(fl.GeneExpThreshold(), 2) except Exception: gene_exp_threshold = 0 try: gene_rpkm_threshold = float(fl.RPKMThreshold()) except Exception: gene_rpkm_threshold = 0 try: FDR_statistic = fl.FDRStatistic() except Exception: FDR_statistic = 'Benjamini-Hochberg' calculateAsNonLog = True if blanksPresent: calculateAsNonLog = False ### Begin processing sample expression values according to the organized groups for row_id in data: try: gene = probeset_db[row_id][0] except TypeError: gene = '' #not needed if not altsplice data data_headers2 = {} #reset each time grouped_ordered_array_list = {} for x in array_order: y = x[1] #this is the new first index group = x[2] group_name = x[3] group_name_db[group] = group_name #for example y = 5, therefore the data[row_id][5] entry is now the first try: try: new_item = data[row_id][y] except IndexError: print row_id, data[row_id], len( data[row_id]), y, len(array_order), array_order kill if logvalues == False and calculateAsNonLog and array_type == 'RNASeq': new_item = math.pow(2, new_item) except TypeError: new_item = '' #this is for a spacer added in the above function try: grouped_ordered_array_list[group].append(new_item) except KeyError: grouped_ordered_array_list[group] = [new_item] try: data_headers2[group].append(data_headers[y]) except KeyError: data_headers2[group] = [data_headers[y]] #perform statistics on each group comparison - comp_group_list: [(1,2),(3,4)] stat_results = {} group_summary_results = {} for comp in comp_group_list: group1 = int(comp[0]) group2 = int(comp[1]) group1_name = group_name_db[group1] group2_name = group_name_db[group2] groups_name = group1_name + "_vs_" + group2_name data_list1 = grouped_ordered_array_list[group1] data_list2 = grouped_ordered_array_list[ group2] #baseline expression if blanksPresent: ### Allows for empty cells data_list1 = filterBlanks(data_list1) data_list2 = filterBlanks(data_list2) try: avg1 = statistics.avg(data_list1) except Exception: avg1 = '' try: avg2 = statistics.avg(data_list2) except Exception: avg2 = '' try: if (logvalues == False and array_type != 'RNASeq') or (logvalues == False and calculateAsNonLog): fold = avg1 / avg2 log_fold = math.log(fold, 2) if fold < 1: fold = -1.0 / fold else: log_fold = avg1 - avg2 fold = statistics.log_fold_conversion(log_fold) except Exception: log_fold = '' fold = '' try: #t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function #t = abs(t); df = round(df); p = str(statistics.t_probability(t,df)) p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic) except Exception: p = 1 sg = 1 N1 = 0 N2 = 0 comp = group1, group2 if array_type == 'RNASeq': ### Also non-log but treated differently if 'RPKM' == norm: adj = 0 else: adj = 1 if calculateAsNonLog == False: try: avg1 = math.pow(2, avg1) - adj avg2 = math.pow(2, avg2) - adj except Exception: avg1 = '' avg2 = '' if 'RPKM' == norm: if avg1 < gene_rpkm_threshold and avg2 < gene_rpkm_threshold: log_fold = 'Insufficient Expression' fold = 'Insufficient Expression' else: if avg1 < gene_exp_threshold and avg2 < gene_exp_threshold: log_fold = 'Insufficient Expression' fold = 'Insufficient Expression' #if row_id=='ENSG00000085514': #if fold=='Insufficient Expression': #print [norm, avg1, avg2, fold, comp, gene_exp_threshold, gene_rpkm_threshold, row_id] #5.96999111075 7.72930768675 Insufficient Expression (3, 1) 1.0 ENSG00000085514 if gene_rpkm_threshold != 0 and calculateAsNonLog: ### Any other data a1 = nonLogAvg(data_list1) a2 = nonLogAvg(data_list2) #print [a1,a2,gene_rpkm_threshold] if a1 < gene_rpkm_threshold and a2 < gene_rpkm_threshold: log_fold = 'Insufficient Expression' fold = 'Insufficient Expression' #print log_fold;kill try: gs = statistics.GroupStats(log_fold, fold, p) stat_results[comp] = groups_name, gs, group2_name if probability_statistic == 'moderated t-test': gs.setAdditionalStats( data_list1, data_list2) ### Assuming equal variance if probability_statistic == 'moderated Welch-test': gs.setAdditionalWelchStats( data_list1, data_list2) ### Assuming unequal variance except Exception: null = [] replicates = 'no' ### Occurs when not enough replicates #print comp, len(stat_results); kill_program group_summary_results[group1] = group1_name, [avg1] group_summary_results[group2] = group2_name, [avg2] ### Replaces the below method to get the largest possible comparison fold and ftest p-value grouped_exp_data = [] avg_exp_data = [] for group in grouped_ordered_array_list: data_list = grouped_ordered_array_list[group] if blanksPresent: ### Allows for empty cells data_list = filterBlanks(data_list) if len(data_list) > 0: grouped_exp_data.append(data_list) try: avg = statistics.avg(data_list) avg_exp_data.append(avg) except Exception: avg = '' #print row_id, group, data_list;kill try: avg_exp_data.sort() max_fold = avg_exp_data[-1] - avg_exp_data[0] except Exception: max_fold = 'NA' try: ftestp = statistics.OneWayANOVA(grouped_exp_data) except Exception: ftestp = 1 gs = statistics.GroupStats(max_fold, 0, ftestp) summary_filtering_stats[row_id] = gs stat_result_list = [] for entry in stat_results: data_tuple = entry, stat_results[entry] stat_result_list.append(data_tuple) stat_result_list.sort() grouped_ordered_array_list2 = [] for group in grouped_ordered_array_list: data_tuple = group, grouped_ordered_array_list[group] grouped_ordered_array_list2.append(data_tuple) grouped_ordered_array_list2.sort( ) #now the list is sorted by group number ###for each rowid, add in the reordered data, and new statistics for each group and for each comparison for entry in grouped_ordered_array_list2: group_number = entry[0] original_data_values = entry[1] if include_raw_data == 'yes': ###optionally exclude the raw values for value in original_data_values: if array_type == 'RNASeq': if norm == 'RPKM': adj = 0 else: adj = 1 if calculateAsNonLog == False: value = math.pow(2, value) - adj try: expbuilder_value_db[row_id].append(value) except KeyError: expbuilder_value_db[row_id] = [value] if group_number in group_summary_results: group_summary_data = group_summary_results[group_number][ 1] #the group name is listed as the first entry for value in group_summary_data: try: expbuilder_value_db[row_id].append(value) except KeyError: expbuilder_value_db[row_id] = [value] for info in stat_result_list: if info[0][ 0] == group_number: #comp,(groups_name,[avg1,log_fold,fold,ttest]) comp = info[0] gs = info[1][1] expbuilder_value_db[row_id].append(gs.LogFold()) expbuilder_value_db[row_id].append(gs.Fold()) expbuilder_value_db[row_id].append(gs.Pval()) ### Create a placeholder and store the position of the adjusted p-value to be calculated expbuilder_value_db[row_id].append('') gs.SetAdjPIndex(len(expbuilder_value_db[row_id]) - 1) gs.SetPvalIndex(len(expbuilder_value_db[row_id]) - 2) pval_summary_db[(row_id, comp)] = gs ###do the same for the headers, but at the dataset level (redundant processes) array_fold_headers = [] data_headers3 = [] try: for group in data_headers2: data_tuple = group, data_headers2[ group] #e.g. 1, ['X030910_25_hl.CEL', 'X030910_29R_hl.CEL', 'X030910_45_hl.CEL']) data_headers3.append(data_tuple) data_headers3.sort() except UnboundLocalError: print data_headers, '\n', array_order, '\n', comp_group_list, '\n' kill_program for entry in data_headers3: x = 0 #indicates the times through a loop y = 0 #indicates the times through a loop group_number = entry[0] original_data_values = entry[1] if include_raw_data == 'yes': ###optionally exclude the raw values for value in original_data_values: array_fold_headers.append(value) if group_number in group_summary_results: group_name = group_summary_results[group_number][0] group_summary_data = group_summary_results[group_number][1] for value in group_summary_data: combined_name = group_summary_result_names[ x] + group_name #group_summary_result_names = ['avg-'] array_fold_headers.append(combined_name) x += 1 #increment the loop index for info in stat_result_list: if info[0][ 0] == group_number: #comp,(groups_name,[avg1,log_fold,fold,ttest],group2_name) groups_name = info[1][0] only_add_these = stat_result_names[1:] for value in only_add_these: new_name = value + groups_name array_fold_headers.append(new_name) ###For the raw_data only export we need the headers for the different groups (data_headers2) and group names (group_name_db) raw_data_comp_headers = {} for comp in comp_group_list: temp_raw = [] group1 = int(comp[0]) group2 = int(comp[1]) comp = str(comp[0]), str(comp[1]) g1_headers = data_headers2[group1] g2_headers = data_headers2[group2] g1_name = group_name_db[group1] g2_name = group_name_db[group2] for header in g2_headers: temp_raw.append(g2_name + ':' + header) for header in g1_headers: temp_raw.append(g1_name + ':' + header) raw_data_comp_headers[comp] = temp_raw ###Calculate adjusted ftest p-values using BH95 sorted method statistics.adjustPermuteStats(summary_filtering_stats) ### Calculate adjusted p-values for all p-values using BH95 sorted method round = 0 for info in comp_group_list: compid = int(info[0]), int(info[1]) pval_db = {} for (rowid, comp) in pval_summary_db: if comp == compid: gs = pval_summary_db[(rowid, comp)] pval_db[rowid] = gs if 'moderated' in probability_statistic and replicates == 'yes': ### Moderates the original reported test p-value prior to adjusting try: statistics.moderateTestStats(pval_db, probability_statistic) except Exception: if round == 0: if replicates == 'yes': print 'Moderated test failed due to issue with mpmpath or out-of-range values\n ... using unmoderated unpaired test instead!' null = [] ### Occurs when not enough replicates round += 1 if FDR_statistic == 'Benjamini-Hochberg': statistics.adjustPermuteStats(pval_db) else: ### Calculate a qvalue (https://github.com/nfusi/qvalue) import numpy import qvalue pvals = [] keys = [] for key in pval_db: pvals.append(pval_db[key].Pval()) keys.append(key) pvals = numpy.array(pvals) pvals = qvalue.estimate(pvals) for i in range(len(pvals)): pval_db[keys[i]].SetAdjP(pvals[i]) for rowid in pval_db: gs = pval_db[rowid] expbuilder_value_db[rowid][gs.AdjIndex()] = gs.AdjP( ) ### set the place holder to the calculated value if 'moderated' in probability_statistic: expbuilder_value_db[rowid][gs.RawIndex()] = gs.Pval( ) ### Replace the non-moderated with a moderated p-value pval_summary_db = [] ###Finished re-ordering lists and adding statistics to expbuilder_value_db return expbuilder_value_db, array_fold_headers, summary_filtering_stats, raw_data_comp_headers
def reorder(data,data_headers,array_order,comp_group_list,probeset_db,include_raw_data,array_type,norm,fl,logvalues=True,blanksPresent=False): ###array_order gives the final level order sorted, followed by the original index order as a tuple expbuilder_value_db = {}; group_name_db = {}; summary_filtering_stats = {}; pval_summary_db= {} replicates = 'yes' stat_result_names = ['avg-','log_fold-','fold-','rawp-','adjp-'] group_summary_result_names = ['avg-'] ### Define expression variables try: probability_statistic = fl.ProbabilityStatistic() except Exception: probability_statistic = 'unpaired t-test' try: gene_exp_threshold = math.log(fl.GeneExpThreshold(),2) except Exception: gene_exp_threshold = 0 try: gene_rpkm_threshold = float(fl.RPKMThreshold()) except Exception: gene_rpkm_threshold = 0 try: FDR_statistic = fl.FDRStatistic() except Exception: FDR_statistic = 'Benjamini-Hochberg' calculateAsNonLog=True if blanksPresent: calculateAsNonLog=False ### Begin processing sample expression values according to the organized groups for row_id in data: try: gene = probeset_db[row_id][0] except TypeError: gene = '' #not needed if not altsplice data data_headers2 = {} #reset each time grouped_ordered_array_list = {} for x in array_order: y = x[1] #this is the new first index group = x[2] group_name = x[3] group_name_db[group] = group_name #for example y = 5, therefore the data[row_id][5] entry is now the first try: try: new_item = data[row_id][y] except IndexError: print row_id,data[row_id],len(data[row_id]),y,len(array_order),array_order;kill if logvalues==False and calculateAsNonLog and array_type == 'RNASeq': new_item = math.pow(2,new_item) except TypeError: new_item = '' #this is for a spacer added in the above function try: grouped_ordered_array_list[group].append(new_item) except KeyError: grouped_ordered_array_list[group] = [new_item] try: data_headers2[group].append(data_headers[y]) except KeyError: data_headers2[group]= [data_headers[y]] #perform statistics on each group comparison - comp_group_list: [(1,2),(3,4)] stat_results = {} group_summary_results = {} for comp in comp_group_list: group1 = int(comp[0]) group2 = int(comp[1]) group1_name = group_name_db[group1] group2_name = group_name_db[group2] groups_name = group1_name + "_vs_" + group2_name data_list1 = grouped_ordered_array_list[group1] data_list2 = grouped_ordered_array_list[group2] #baseline expression if blanksPresent: ### Allows for empty cells data_list1 = filterBlanks(data_list1) data_list2 = filterBlanks(data_list2) try: avg1 = statistics.avg(data_list1) except Exception: avg1 = '' try: avg2 = statistics.avg(data_list2) except Exception: avg2='' try: if (logvalues == False and array_type != 'RNASeq') or (logvalues==False and calculateAsNonLog): fold = avg1/avg2 log_fold = math.log(fold,2) if fold<1: fold = -1.0/fold else: log_fold = avg1 - avg2 fold = statistics.log_fold_conversion(log_fold) except Exception: log_fold=''; fold='' try: #t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function #t = abs(t); df = round(df); p = str(statistics.t_probability(t,df)) p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic) except Exception: p = 1; sg = 1; N1=0; N2=0 comp = group1,group2 if array_type == 'RNASeq': ### Also non-log but treated differently if 'RPKM' == norm: adj = 0 else: adj = 1 if calculateAsNonLog == False: try: avg1 = math.pow(2,avg1)-adj; avg2 = math.pow(2,avg2)-adj except Exception: avg1=''; avg2='' if 'RPKM' == norm: if avg1 < gene_rpkm_threshold and avg2 < gene_rpkm_threshold: log_fold = 'Insufficient Expression' fold = 'Insufficient Expression' else: if avg1 < gene_exp_threshold and avg2 < gene_exp_threshold: log_fold = 'Insufficient Expression' fold = 'Insufficient Expression' #if row_id=='ENSG00000085514': #if fold=='Insufficient Expression': #print [norm, avg1, avg2, fold, comp, gene_exp_threshold, gene_rpkm_threshold, row_id] #5.96999111075 7.72930768675 Insufficient Expression (3, 1) 1.0 ENSG00000085514 if gene_rpkm_threshold!=0 and calculateAsNonLog: ### Any other data a1 = nonLogAvg(data_list1) a2 = nonLogAvg(data_list2) #print [a1,a2,gene_rpkm_threshold] if a1<gene_rpkm_threshold and a2<gene_rpkm_threshold: log_fold = 'Insufficient Expression' fold = 'Insufficient Expression' #print log_fold;kill try: gs = statistics.GroupStats(log_fold,fold,p) stat_results[comp] = groups_name,gs,group2_name if probability_statistic == 'moderated t-test': gs.setAdditionalStats(data_list1,data_list2) ### Assuming equal variance if probability_statistic == 'moderated Welch-test': gs.setAdditionalWelchStats(data_list1,data_list2) ### Assuming unequal variance except Exception: null=[]; replicates = 'no' ### Occurs when not enough replicates #print comp, len(stat_results); kill_program group_summary_results[group1] = group1_name,[avg1] group_summary_results[group2] = group2_name,[avg2] ### Replaces the below method to get the largest possible comparison fold and ftest p-value grouped_exp_data = []; avg_exp_data = [] for group in grouped_ordered_array_list: data_list = grouped_ordered_array_list[group] if blanksPresent: ### Allows for empty cells data_list = filterBlanks(data_list) if len(data_list)>0: grouped_exp_data.append(data_list) try: avg = statistics.avg(data_list); avg_exp_data.append(avg) except Exception: avg = '' #print row_id, group, data_list;kill try: avg_exp_data.sort(); max_fold = avg_exp_data[-1]-avg_exp_data[0] except Exception: max_fold = 'NA' try: ftestp = statistics.OneWayANOVA(grouped_exp_data) except Exception: ftestp = 1 gs = statistics.GroupStats(max_fold,0,ftestp) summary_filtering_stats[row_id] = gs stat_result_list = [] for entry in stat_results: data_tuple = entry,stat_results[entry] stat_result_list.append(data_tuple) stat_result_list.sort() grouped_ordered_array_list2 = [] for group in grouped_ordered_array_list: data_tuple = group,grouped_ordered_array_list[group] grouped_ordered_array_list2.append(data_tuple) grouped_ordered_array_list2.sort() #now the list is sorted by group number ###for each rowid, add in the reordered data, and new statistics for each group and for each comparison for entry in grouped_ordered_array_list2: group_number = entry[0] original_data_values = entry[1] if include_raw_data == 'yes': ###optionally exclude the raw values for value in original_data_values: if array_type == 'RNASeq': if norm == 'RPKM': adj = 0 else: adj = 1 if calculateAsNonLog == False: value = math.pow(2,value)-adj try: expbuilder_value_db[row_id].append(value) except KeyError: expbuilder_value_db[row_id] = [value] if group_number in group_summary_results: group_summary_data = group_summary_results[group_number][1] #the group name is listed as the first entry for value in group_summary_data: try: expbuilder_value_db[row_id].append(value) except KeyError: expbuilder_value_db[row_id] = [value] for info in stat_result_list: if info[0][0] == group_number: #comp,(groups_name,[avg1,log_fold,fold,ttest]) comp = info[0]; gs = info[1][1] expbuilder_value_db[row_id].append(gs.LogFold()) expbuilder_value_db[row_id].append(gs.Fold()) expbuilder_value_db[row_id].append(gs.Pval()) ### Create a placeholder and store the position of the adjusted p-value to be calculated expbuilder_value_db[row_id].append('') gs.SetAdjPIndex(len(expbuilder_value_db[row_id])-1) gs.SetPvalIndex(len(expbuilder_value_db[row_id])-2) pval_summary_db[(row_id,comp)] = gs ###do the same for the headers, but at the dataset level (redundant processes) array_fold_headers = []; data_headers3 = [] try: for group in data_headers2: data_tuple = group,data_headers2[group] #e.g. 1, ['X030910_25_hl.CEL', 'X030910_29R_hl.CEL', 'X030910_45_hl.CEL']) data_headers3.append(data_tuple) data_headers3.sort() except UnboundLocalError: print data_headers,'\n',array_order,'\n',comp_group_list,'\n'; kill_program for entry in data_headers3: x = 0 #indicates the times through a loop y = 0 #indicates the times through a loop group_number = entry[0] original_data_values = entry[1] if include_raw_data == 'yes': ###optionally exclude the raw values for value in original_data_values: array_fold_headers.append(value) if group_number in group_summary_results: group_name = group_summary_results[group_number][0] group_summary_data = group_summary_results[group_number][1] for value in group_summary_data: combined_name = group_summary_result_names[x] + group_name #group_summary_result_names = ['avg-'] array_fold_headers.append(combined_name) x += 1 #increment the loop index for info in stat_result_list: if info[0][0] == group_number: #comp,(groups_name,[avg1,log_fold,fold,ttest],group2_name) groups_name = info[1][0] only_add_these = stat_result_names[1:] for value in only_add_these: new_name = value + groups_name array_fold_headers.append(new_name) ###For the raw_data only export we need the headers for the different groups (data_headers2) and group names (group_name_db) raw_data_comp_headers = {} for comp in comp_group_list: temp_raw = [] group1 = int(comp[0]);group2 = int(comp[1]) comp = str(comp[0]),str(comp[1]) g1_headers = data_headers2[group1] g2_headers = data_headers2[group2] g1_name = group_name_db[group1] g2_name = group_name_db[group2] for header in g2_headers: temp_raw.append(g2_name+':'+header) for header in g1_headers: temp_raw.append(g1_name+':'+header) raw_data_comp_headers[comp] = temp_raw ###Calculate adjusted ftest p-values using BH95 sorted method statistics.adjustPermuteStats(summary_filtering_stats) ### Calculate adjusted p-values for all p-values using BH95 sorted method round=0 for info in comp_group_list: compid = int(info[0]),int(info[1]); pval_db={} for (rowid,comp) in pval_summary_db: if comp == compid: gs = pval_summary_db[(rowid,comp)] pval_db[rowid] = gs if 'moderated' in probability_statistic and replicates == 'yes': ### Moderates the original reported test p-value prior to adjusting try: statistics.moderateTestStats(pval_db,probability_statistic) except Exception: if round == 0: if replicates == 'yes': print 'Moderated test failed due to issue with mpmpath or out-of-range values\n ... using unmoderated unpaired test instead!' null=[] ### Occurs when not enough replicates round+=1 if FDR_statistic == 'Benjamini-Hochberg': statistics.adjustPermuteStats(pval_db) else: ### Calculate a qvalue (https://github.com/nfusi/qvalue) import numpy; import qvalue; pvals = []; keys = [] for key in pval_db: pvals.append(pval_db[key].Pval()); keys.append(key) pvals = numpy.array(pvals) pvals = qvalue.estimate(pvals) for i in range(len(pvals)): pval_db[keys[i]].SetAdjP(pvals[i]) for rowid in pval_db: gs = pval_db[rowid] expbuilder_value_db[rowid][gs.AdjIndex()] = gs.AdjP() ### set the place holder to the calculated value if 'moderated' in probability_statistic: expbuilder_value_db[rowid][gs.RawIndex()] = gs.Pval() ### Replace the non-moderated with a moderated p-value pval_summary_db=[] ###Finished re-ordering lists and adding statistics to expbuilder_value_db return expbuilder_value_db, array_fold_headers, summary_filtering_stats, raw_data_comp_headers
def performTest(self, ontology_type, genes_of_interest, cutoff): if cutoff is None: cutoff = p_val_threshold total_num_genes = self._mapper.get_gene_count(ontology_type) term2genes = self._mapper.get_term_mapping(ontology_type) gene2terms = self._mapper.get_gene_mapping(ontology_type) filtered = self.__filter_input(genes_of_interest, gene2terms) #print(filtered) #print(len(filtered)) sample_map = self.__calculateTermFrequency(filtered, gene2terms) n = len(filtered) # Number of tests performed: will be used for correction. num_tests = len(sample_map) results = [ {} ] * num_tests pvals = np.zeros(num_tests) idx = 0 for term in sample_map: # Calculate p-value sampled = sample_map[term] assigned_genes = term2genes[term] k = len(sampled) m = len(assigned_genes) p = stats.hypergeom.pmf(k,total_num_genes,m,n) p_corrected = p * num_tests pvals[idx] = p result = {} result['id'] = term result['p-value'] = p_corrected result['background'] = m result['genes'] = list(sampled) results[idx] = result idx = idx + 1 #print(term + " = " + str(p) + ", k = " + str(k) + ", m = " + str(m) + ", total = " + str(total_num_genes) + ", n= " + str(n)) # Correct border values (library does not accept 0 & 1) i = 0 for p in pvals: if p >= 1.0 or math.isnan(p): pvals[i] = 0.9999999999999999999999 elif p <= 0: pvals[i] = 0.0000000000000000000001 i += 1 qvals = qvalue.estimate(pvals) filtered_results = [] idx = 0 for term in sample_map: qv = qvals[idx] res = results[idx] pv = res['p-value'] k = len(res['genes']) res['q-value'] = qv if pv < cutoff and k >= gene_threshold: filtered_results.append(res) idx = idx + 1 return {'results':filtered_results, 'total_genes':total_num_genes}
print('postprocessed data size', testddirect.shape) t = time.time() r = RunBootstrapPercentile(testddirect, NumPerm=100, gN=G, useTF=False, n_cores=1, fSavefile=False) print('Completed in %g seconds' % (time.time() - t)) ####################### Hypothesis test ############################################ import qvalue alpha = 1e-6 # significance level for hypothesis test. Can vary this here. dj = np.diag_indices(G, 2) # index to diagonal of a G X G matrix # qvalue calculation should be similar to Bonferonni qvalues, pi0 = qvalue.estimate(r['pvalues'], verbose=True) adjMatrixBootstrapQ = np.zeros((G, G), dtype=bool) adjMatrixBootstrapQ[qvalues < alpha] = True adjMatrixBootstrapQ[dj] = False # remove self-transitions adjMatrixTrue = np.zeros((G, G), dtype=bool) for i in range(G): for j in range(G): adjMatrixTrue[i, j] = (angularSpeed[i] == angularSpeed[j]) adjMatrixTrue[ dj] = False # have to do it, because white noise diagonal not true trueDF = pd.DataFrame(adjMatrixTrue, columns=data.index, index=data.index) trueDF.to_csv('trueClusteringAdjMatrix.csv') edgeNetworkTrue = CreateEdgeNetwork(adjMatrixBootstrap=adjMatrixTrue, cost=np.ones((G, G)),
def perform_test(self, ontology_type, genes_of_interest, cutoff): if cutoff is None: cutoff = p_val_threshold total_num_genes = self._mapper.get_gene_count(ontology_type) term2genes = self._mapper.get_term_mapping(ontology_type) gene2terms = self._mapper.get_gene_mapping(ontology_type) filtered = self.__filter_input(genes_of_interest, gene2terms) # print(filtered) # print(len(filtered)) sample_map = self.__calculateTermFrequency(filtered, gene2terms) n = len(filtered) # Number of tests performed: will be used for correction. num_tests = len(sample_map) results = [{}] * num_tests pvals = np.zeros(num_tests) idx = 0 for term in sample_map: # Calculate p-value sampled = sample_map[term] assigned_genes = term2genes[term] k = len(sampled) m = len(assigned_genes) p = stats.hypergeom.pmf(k, total_num_genes, m, n) p_corrected = p * num_tests pvals[idx] = p result = {} result['id'] = term result['p-value'] = p_corrected result['background'] = m result['genes'] = list(sampled) results[idx] = result idx = idx + 1 # print(term + " = " + str(p) + ", k = " + str(k) + ", # m = " + str(m) + ", total = " + str(total_num_genes) + ", n= " + str(n)) # Correct border values (library does not accept 0 & 1) i = 0 for p in pvals: if p >= 1.0 or math.isnan(p): pvals[i] = 0.9999999999999999999999 elif p <= 0: pvals[i] = 0.0000000000000000000001 i += 1 qvals = qvalue.estimate(pvals) filtered_results = [] idx = 0 for term in sample_map: qv = qvals[idx] res = results[idx] pv = res['p-value'] k = len(res['genes']) res['q-value'] = qv if pv < cutoff and k >= gene_threshold: filtered_results.append(res) idx = idx + 1 return {'results': filtered_results, 'total_genes': total_num_genes}
final_matrix = pd.DataFrame() pathway_list = np.unique(asd["pathway"]) for i in pathway_list[0:1]: pathway_list_each = list(asd.gene[asd.pathway == i]) expression_overlaid_pathway = pathway_overlaid(expression_data_raw, pathway_list_each) expression_overlaid_pathway.columns = [ "gene" ] + ["case"] * 5 + ["control"] * 10 pathway_matrix = activity_score_calculate(expression_overlaid_pathway, "case", "control") pathway_matrix_T = pathway_matrix.T pathway_matrix_T["pathway"] = i final_matrix = final_matrix.append(pathway_matrix_T) print(final_matrix) print(final_matrix.loc["p_value"]) final_matrix["q-value"] = qvalue.estimate(final_matrix["p_value"]) print(final_matrix) final_matrix.to_csv("./output data/0926_final_matrix_" + ex_list.split(".")[0] + ".csv", index=False)