def get_exp_proportion(exp): study = v5.exp_to_study(exp) nonzero = pd.read_csv(decon_temp + 'nonzero_Weight_' + study + '.tsv', sep = '\t',index_col=0) exps = v5.study_to_exps(study) if len(exps) == 1: singleExp = True else: singleExp = False return get_proportion(exp, nonzero,singleExp)
def scatterplot(ax, exp, celltype, remove_study = False): study = v5.exp_to_study(exp) if remove_study: if not is_type_available(celltype, study): print("Cell type ({}) not provided in reference ".format(celltype) + "matrix for this study.") return query = get_query_expression(exp) exp_list = v5.study_to_exps(study) reference = get_reference_expression(celltype, exp_list, remove_study) ax.scatter(reference[0], query) ax.set(xlim=(0, 450000), ylim=(0, 450000)) diag_line, = ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3") xstick = np.arange(0, 450000 ,100000) ax.set_xticks(xstick) ax.set_yticks(xstick) ax.set(xlabel = "{} ({} experiments)".format(co.get_term_name(celltype), reference[1]), ylabel = exp)
def multi_core(cell_exp_count, studies, exp_acc, gene_ids, countspermillion, qualified_cell_type_name, cell_type_file, qualified_cell_type): exp_acc_list = list(exp_acc) cell_types_selected = qualified_cell_type # Construct the noise added reference matrix reference_matrix = [] select_study_list = {} for i in qualified_cell_type: tmp_exp = V5.celltype_to_exp(i) select_sample = random.choice(tmp_exp) select_study_list[i] = V5.exp_to_study(select_sample) exp_index = exp_acc_list.index(select_sample) reference_matrix.append(exp_index) # print(select_study_list) # Build the noise added reference matrix for i in range(len(reference_matrix)): if i == 0: reference = countspermillion[reference_matrix[i]] else: tmp = countspermillion[reference_matrix[i]] reference = np.vstack((reference, tmp)) reference_noise = reference # Build the reference matrix reference_noise_free = [] for i in range(len(qualified_cell_type)): tmp_exp = V5.celltype_to_exp(qualified_cell_type[i]) tmp_ref = [] # Since all cell type will be included, therefore we can simply using the previous one` for j in tmp_exp: # Only one study wll be chosen to construct the noisy reference, therefore using != if V5.exp_to_study(j) != select_study_list[qualified_cell_type[i]]: tmp_ref.append(exp_acc_list.index(j)) for j in range(len(tmp_ref)): if j == 0: reference = countspermillion[tmp_ref[j]] else: tmp = countspermillion[tmp_ref[j]] reference = np.vstack((reference, tmp)) if len(tmp_ref) > 1: ref_mean = np.mean(reference, axis=0) else: ref_mean = reference reference_noise_free.append(ref_mean) reference_noise_free_np = np.array(reference_noise_free) signature_np = np.transpose(reference_noise_free_np) reference_noise_np = reference_noise.copy() signature_noise_np = np.transpose(reference_noise_np) # signature_temp = signature_np.copy() # Transform to pandas signature_np = signature_np.transpose() signature_noise_np = signature_noise_np.transpose() signature_pd = pd.DataFrame(data=signature_np, columns=gene_ids, index=qualified_cell_type_name) signature_noise_np_pd = pd.DataFrame( data=signature_noise_np, columns=gene_ids, index=[co.get_term_name(i) for i in qualified_cell_type]) # Save the signature and noisy signature for future analysis signature_pd.to_csv('~/IndependentStudy/Data/SignatureSimulation/' + str(cell_exp_count) + '_signature.tsv', sep='\t') signature_noise_np_pd.to_csv( '~/IndependentStudy/Data/SignatureSimulation/' + str(cell_exp_count) + '_signature_noise.tsv', sep='\t') # Build the variance data set # Eliminate the redundant cell type in all exp cell_type_specific_file = {} for i in cell_type_file: cell_type_specific_file[i] = co.get_terms_without_children( cell_type_file[i]) # Build the exp to study check dictionary studyexpMap = {} expstudyMap = {} for i in range(len(exp_acc)): expstudyMap[exp_acc[i]] = studies[i] if studies[i] not in studyexpMap: studyexpMap[studies[i]] = [exp_acc[i]] else: studyexpMap[studies[i]].append(exp_acc[i]) # Build the variance matrix variance_matrix = [] cell_types_48 = [] for cell_co in range(len(cell_types_selected)): # Get the cell type cellExpDict = {} for i in cell_type_specific_file: if cell_types_selected[cell_co] in cell_type_specific_file[i]: cellExpDict[i] = [cell_types_selected[cell_co]] # cell type specific Exp to Study dictionary expPerStudy = [] keys = list(cellExpDict.keys()) # print(keys) studyList = [] for i in keys: if expstudyMap[i] not in studyList: studyList.append(expstudyMap[i]) expPerStudy.append(i) else: continue tmp_exp_study = {} for i in cellExpDict.keys(): if expstudyMap[i] not in tmp_exp_study.keys(): tmp_exp_study[expstudyMap[i]] = [i] else: tmp_exp_study[expstudyMap[i]].append(i) # Get the within study variance # Generate the mean profile tmp_mean = [] within_study_var = [] # Build the exp expression matrix # print(tmp_exp_study.items()) for j in tmp_exp_study.items(): # print(select_study_list[cell_types_selected[cell_co]]) # print(j[0]) if j[0] not in select_study_list[cell_types_selected[cell_co]]: # Garb the cell index specific_cell_exp_index = [] for i in range(len(exp_acc)): if exp_acc[i] in j[1]: specific_cell_exp_index.append(i) else: continue specific_cell_exp_signature = get_signatures( specific_cell_exp_index, countspermillion) # Generate the cell_type specific mean (j[1] is a tuple), tmp_mean consist study mean if len(j[1]) == 1: tmp_mean.append(specific_cell_exp_signature) else: tmp_mean.append( np.mean(specific_cell_exp_signature, axis=1)) # Calculate the residue (if j[1] > 1) if len(j[1]) > 1: tmp_residue_list = [] for index in specific_cell_exp_index: tmp_exp = get_signatures([index], countspermillion) tmp_residue = np.abs( tmp_exp - np.mean(specific_cell_exp_signature, axis=1)) tmp_residue_list.append(tmp_residue) # Construct the within study variance tmp_residue_list = np.array(tmp_residue_list) within_study_var.append(np.var(tmp_residue_list, axis=0)) else: within_study_var.append( np.zeros(specific_cell_exp_signature.shape[0])) else: continue cell_types_48 += tmp_mean within_study_var = np.array(within_study_var) # Construct the study variance tmp_mean = np.array(tmp_mean) study_variance = np.var(tmp_mean, axis=0) # We assume variance sum law here total_variance = np.zeros(study_variance.shape[0]) total_variance = total_variance + study_variance for i in within_study_var: total_variance = total_variance + i variance_matrix.append(total_variance) variance_matrix = np.array(variance_matrix) print(variance_matrix.shape) os.system("touch " + '~/IndependentStudy/Data/Variance/' + str(cell_exp_count) + '_variance.txt') np.savetxt('/ua/shi235/IndependentStudy/Data/Variance/' + str(cell_exp_count) + '_variance.txt', variance_matrix, delimiter="\t")