def get_RSA_frequencies(natural_proteins, lower_RSA_boundary, upper_RSA_boundary): natural_distribution = af.get_AA_distribution(natural_proteins) natural_RSA = af.get_RSA_Values(natural_proteins) natural_RSA_array = af.make_array(natural_RSA) seq_length = len(natural_RSA) bin_1 = [] bin_2 = [] bin_3 = [] bin_4 = [] bin_5 = [] i = 0 count = 0 for site in natural_distribution: if (lower_RSA_boundary<=natural_RSA_array[i] and natural_RSA_array[i]<= upper_RSA_boundary): #print natural_RSA_array[i] #print site[0:4] bin_1.append(site[0]) bin_2.append(site[1]) bin_3.append(site[2]) bin_4.append(site[3]) bin_5.append(site[4]) i = i + 1 count = count + 1 else: i = i + 1 if count == 0: frequency_data = [0.0, 0.0, 0.0, 0.0, 0.0] else: frequency_data = [mean(bin_1)/mean(bin_1), mean(bin_2)/mean(bin_1), mean(bin_3)/mean(bin_1), mean(bin_4)/mean(bin_1), mean(bin_5)/mean(bin_1)] if (mean(bin_1)) == 0.0: print "MEAN OF BIN 1 is ZERO!!!!" print frequency_data #frequency_data = [mean(bin_1), mean(bin_2), mean(bin_3), mean(bin_4), mean(bin_5)] print "Number of residues in bin: " + str(count) return frequency_data
def get_position_count_data(file): buried_counts = [] surface_counts = [] buried_sites = [] surface_sites= [] input = open(file, "r") protein_data = input.readlines() input.close() RSA = af.get_RSA_Values(file) alignment_length = len(RSA) site_data = af.get_transformed_data(protein_data) #print RSA #print site_data i = 0 for site in site_data: if(float(RSA[i]) < 0.05): buried_sites.append(site) else: surface_sites.append(site) i = i + 1 buried_array = np.array(buried_sites) surface_array = np.array(surface_sites) #print buried_array #print buried_array buried_m, buried_n = buried_array.shape buried_total_sum = sum(sum(buried_array)) #print buried_m #print buried_n surface_m, surface_n = surface_array.shape surface_total_sum = sum(sum(surface_array)) #print surface_total_sum j = 0 while (j < buried_n): buried_site_sum = sum(buried_array[:, j]) buried_counts.append(float(buried_site_sum)/float(buried_total_sum)) j = j + 1 j = 0 while (j < surface_n): surface_site_sum = sum(surface_array[:, j]) surface_counts.append(float(surface_site_sum)/float(surface_total_sum)) j = j + 1 return buried_counts, surface_counts
def get_mixed_entropy_values(PDB, buried_temp, surface_temp): new_entropies = [] #Make the files buried_file = "align_data_array_" + PDB + "_" + str(buried_temp) + ".dat" surface_file = "align_data_array_" + PDB + "_" + str(surface_temp) + ".dat" #Get the RSA Values RSA = af.make_array(af.get_RSA_Values(buried_file)) buried_entropies = af.get_native_entropy(buried_file) surface_entropies = af.get_native_entropy(surface_file) #Get the entropy values for i in xrange(len(RSA)): if (float(RSA[i]) <=0.25): new_entropies.append(buried_entropies[i]) else: new_entropies.append(surface_entropies[i]) return RSA, new_entropies
def get_position_count_data(file): buried_counts = [] surface_counts = [] buried_sites = [] surface_sites = [] input = open(file, "r") protein_data = input.readlines() input.close() RSA = af.get_RSA_Values(file) alignment_length = len(RSA) site_data = af.get_transformed_data(protein_data) i = 0 for site in site_data: if (float(RSA[i]) <= 0.05): buried_sites.append(site) else: surface_sites.append(site) i = i + 1 buried_array = np.array(buried_sites) surface_array = np.array(surface_sites) buried_m, buried_n = buried_array.shape buried_total_sum = sum(sum(buried_array)) surface_m, surface_n = surface_array.shape surface_total_sum = sum(sum(surface_array)) #print surface_total_sum j = 0 while (j < buried_n): buried_site_sum = sum(buried_array[:, j]) buried_counts.append(float(buried_site_sum) / float(buried_total_sum)) j = j + 1 j = 0 while (j < surface_n): surface_site_sum = sum(surface_array[:, j]) surface_counts.append( float(surface_site_sum) / float(surface_total_sum)) j = j + 1 return buried_counts, surface_counts
def get_RSA_frequencies(natural_proteins, lower_RSA_boundary, upper_RSA_boundary): #natural_distribution = af.get_AA_distribution(natural_proteins) natural_distribution = get_AA_distribution_mod(natural_proteins) #natural_dis_array = array(natural_distribution) #m,n = natural_dis_array.shape #print "num_residues, length of alignment: " + str(n),m #print natural_distribution natural_RSA = af.get_RSA_Values(natural_proteins) natural_RSA_array = af.make_array(natural_RSA) seq_length = len(natural_RSA) frequency_data = [] all_k_values = [] k_values = [] k_lists = [] bin_1 = [] bin_2 = [] bin_3 = [] bin_4 = [] bin_5 = [] bin_6 = [] bin_7 = [] bin_8 = [] bin_9 = [] bin_10 = [] bin_11 = [] bin_12 = [] bin_13 = [] bin_14 = [] bin_15 = [] bin_16 = [] bin_17 = [] bin_18 = [] bin_19 = [] bin_20 = [] i = 0 count = 0 for site in natural_distribution: if (lower_RSA_boundary<=natural_RSA_array[i] and natural_RSA_array[i]<= upper_RSA_boundary): #print natural_RSA_array[i] #print site[0:4] bin_1.append(site[0]) bin_2.append(site[1]) bin_3.append(site[2]) bin_4.append(site[3]) bin_5.append(site[4]) bin_6.append(site[5]) bin_7.append(site[6]) bin_8.append(site[7]) bin_9.append(site[8]) bin_10.append(site[9]) bin_11.append(site[10]) bin_12.append(site[11]) bin_13.append(site[12]) bin_14.append(site[13]) bin_15.append(site[14]) bin_16.append(site[15]) bin_17.append(site[16]) bin_18.append(site[17]) bin_19.append(site[18]) bin_20.append(site[19]) i = i + 1 count = count + 1 else: i = i + 1 if count != 0: #Need to find a way to exclude the point frequencies = [np.mean(bin_1)/np.mean(bin_1), np.mean(bin_2)/np.mean(bin_1), np.mean(bin_3)/np.mean(bin_1), np.mean(bin_4)/np.mean(bin_1), np.mean(bin_5)/np.mean(bin_1), np.mean(bin_6)/np.mean(bin_1), np.mean(bin_7)/np.mean(bin_1), np.mean(bin_8)/np.mean(bin_1), np.mean(bin_9)/np.mean(bin_1), np.mean(bin_10)/np.mean(bin_1), np.mean(bin_11)/np.mean(bin_1), np.mean(bin_12)/np.mean(bin_1), np.mean(bin_13)/np.mean(bin_1), np.mean(bin_14)/np.mean(bin_1), np.mean(bin_15)/np.mean(bin_1), np.mean(bin_16)/np.mean(bin_1), np.mean(bin_17)/np.mean(bin_1), np.mean(bin_18)/np.mean(bin_1), np.mean(bin_19)/np.mean(bin_1), np.mean(bin_20)/np.mean(bin_1)] all_k_values = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19] k_count = 0 for element in frequencies: if element == 0.0: continue else: frequency_data.append(np.log(element)) k_values.append(all_k_values[k_count]) k_count = k_count + 1 set_k_r(k_values, lower_RSA_boundary, upper_RSA_boundary) return frequency_data
count = 0 for temp in temps: buried_percentage_list = [] for pdb_id in PDBS: file = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(temp) + ".dat" #fileparts = re.split("_",file) #pdb_id = fileparts[3].upper() #Gets the PDB Name and the chain_id #chain_id = fileparts[6] #chain_id = chain_id[0] #print "Processing file: " + file #pdb_names.append(pdb_id) #chain_names.append(chain_id) proteins = file RSA = af.get_RSA_Values(proteins) #natural_RSA_array = af.make_array(natural_RSA) buried_percentage = get_percent_buried(RSA) buried_percentage_list.append(buried_percentage) #print temp, RSA #print str(temp), buried_percentage_list, "/n" N = 40 #The number of bars in each group - should be 20 for the 20 amino acids index = np.arange(N) # The x locations for the groups fig = plt.figure(count) ax = plt.axes([0.07, 0.14, 0.90, 0.83]) width = 0.40 #The width of the bars b1 = plt.bar(index, buried_percentage_list, width, color = "blue") temp_string = "Temp = " + str(temp) ax.text(37.5, 0.325, temp_string, fontweight = 'bold', ha = 'center', fontsize = 14)#, va = 'center', fontsize = 16) #ax.text(0.6, 0.265,"Temp = " + str(temp), fontweight = 'bold', fontsize = 18)#, ha = 'center', va = 'center', fontsize = 16)
for PDB in PDBS: RSA1, entropy_mix1 = get_mixed_entropy_values(PDB, 0.0, 0.1) RSA2, entropy_mix2 = get_mixed_entropy_values(PDB, 0.03, 0.1) [cor_entropy_RSA_mix1, pvalue1] = pearsonr(RSA1, entropy_mix1) cor_entropy_RSA_mix1 = float(cor_entropy_RSA_mix1) cor_values1.append(cor_entropy_RSA_mix1) [cor_entropy_RSA_mix2, pvalue2] = pearsonr(RSA2, entropy_mix2) cor_entropy_RSA_mix2 = float(cor_entropy_RSA_mix2) cor_values2.append(cor_entropy_RSA_mix2) natural_file = "align_natural_data_array_" + PDB + ".dat" natural_RSA = af.make_array(af.get_RSA_Values(natural_file)) natural_entropy = af.get_native_entropy(natural_file) [natural_cor_entropy_RSA, pvalue3] = pearsonr(natural_RSA, natural_entropy) natural_cor_entropy_RSA = float(natural_cor_entropy_RSA) natural_cor_values.append(natural_cor_entropy_RSA) fig = plt.figure(1, dpi = 400, figsize = (16,6)) correlation_values = [cor_values1, cor_values2, natural_cor_values] correlation_values_transpose = transpose(correlation_values) (m,n) = correlation_values_transpose.shape #rcParams['lines.linewidth'] = 2 ax = axes([0.066, 0.115, 0.43, 0.85]) #text(-0.37, 0.6, "A", fontweight = 'bold', ha = 'center', va = 'center', fontsize = 20) '''
count = 0 for temp in temps: buried_percentage_list = [] for pdb_id in PDBS: file = "align_data_array_" + pdb_id + "_" + chain_id + "_" + "soft.dat" #fileparts = re.split("_",file) #pdb_id = fileparts[3].upper() #Gets the PDB Name and the chain_id #chain_id = fileparts[6] #chain_id = chain_id[0] print "Processing file: " + file #pdb_names.append(pdb_id) #chain_names.append(chain_id) natural_proteins = file natural_RSA = af.get_RSA_Values(natural_proteins) #natural_RSA_array = af.make_array(natural_RSA) buried_percentage = get_percent_buried(natural_RSA) buried_percentage_list.append(buried_percentage) print buried_percentage_list N = 40 #The number of bars in each group - should be 20 for the 20 amino acids index = np.arange(N) # The x locations for the groups fig = plt.figure(count) ax = plt.axes([0.07, 0.14, 0.90, 0.83]) width = 0.40 #The width of the bars b1 = plt.bar(index, buried_percentage_list, width, color = "blue") temp_string = "Temp = " + str(temp) ax.text(37.5, 0.325, temp_string, fontweight = 'bold', ha = 'center', fontsize = 14)#, va = 'center', fontsize = 16) #ax.text(0.6, 0.265,"Temp = " + str(temp), fontweight = 'bold', fontsize = 18)#, ha = 'center', va = 'center', fontsize = 16) ax.set_xticklabels(PDBS, rotation = 'vertical')
designed_proteins_rosetta = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str( "rosetta") + ".dat" designed_proteins_evolved = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str( "evolved") + ".dat" split_natural_1 = "align_natural_sample1_data_array_" + pdb_id + "_" + chain_id + ".dat" split_natural_2 = "align_natural_sample2_data_array_" + pdb_id + "_" + chain_id + ".dat" #Calculates all of the data for comparison (ex. entropy) natural_distribution = analysis_functions.get_AA_distribution_KL( natural_proteins) natural_entropy = analysis_functions.get_native_entropy( natural_proteins) natural_entropy_array = analysis_functions.make_array( natural_entropy) natural_RSA = analysis_functions.get_RSA_Values(natural_proteins) natural_RSA_array = analysis_functions.make_array(natural_RSA) natural_mean_RSA_values.append(mean(natural_RSA_array)) natural_mean_entropy_values.append(mean(natural_entropy_array)) #Calculates cn & wcn # cn13_data = analysis_functions.get_cn13_values(pdb_id, chain_id) # iCN13 = cn13_data[0] # iCN13_array = analysis_functions.make_array(cn13_data) # mean_iCN13_values.append(mean(iCN13_array)) iwcn_data = calc_wcn.get_iwcn_values(pdb_id, chain_id) iWCN_array = analysis_functions.make_array(iwcn_data) mean_iWCN_values.append(mean(iWCN_array)) designed_distribution_rosetta = analysis_functions.get_AA_distribution_KL(
def get_RSA_frequencies(natural_proteins, lower_RSA_boundary, upper_RSA_boundary): natural_distribution = af.get_AA_distribution(natural_proteins) #natural_distribution = get_AA_distribution_mod(natural_proteins) #natural_dis_array = array(natural_distribution) #m,n = natural_dis_array.shape #print "num_residues, length of alignment: " + str(n),m #print natural_distribution natural_RSA = af.get_RSA_Values(natural_proteins) natural_RSA_array = af.make_array(natural_RSA) seq_length = len(natural_RSA) frequency_data = [] bin_1 = [] bin_2 = [] bin_3 = [] bin_4 = [] bin_5 = [] bin_6 = [] bin_7 = [] bin_8 = [] bin_9 = [] bin_10 = [] bin_11 = [] bin_12 = [] bin_13 = [] bin_14 = [] bin_15 = [] bin_16 = [] bin_17 = [] bin_18 = [] bin_19 = [] bin_20 = [] i = 0 count = 0 for site in natural_distribution: if (lower_RSA_boundary<=natural_RSA_array[i] and natural_RSA_array[i]<= upper_RSA_boundary): #print natural_RSA_array[i] #print site[0:4] bin_1.append(site[0]) bin_2.append(site[1]) bin_3.append(site[2]) bin_4.append(site[3]) bin_5.append(site[4]) bin_6.append(site[5]) bin_7.append(site[6]) bin_8.append(site[7]) bin_9.append(site[8]) bin_10.append(site[9]) bin_11.append(site[10]) bin_12.append(site[11]) bin_13.append(site[12]) bin_14.append(site[13]) bin_15.append(site[14]) bin_16.append(site[15]) bin_17.append(site[16]) bin_18.append(site[17]) bin_19.append(site[18]) bin_20.append(site[19]) i = i + 1 count = count + 1 else: i = i + 1 if count == 0: #Need to find a way to exclude the point frequency_data = [-1] # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] #return frequency_data else: frequencies = [np.mean(bin_1)/np.mean(bin_1), np.mean(bin_2)/np.mean(bin_1), np.mean(bin_3)/np.mean(bin_1), np.mean(bin_4)/np.mean(bin_1), np.mean(bin_5)/np.mean(bin_1), np.mean(bin_6)/np.mean(bin_1), np.mean(bin_7)/np.mean(bin_1), np.mean(bin_8)/np.mean(bin_1), np.mean(bin_9)/np.mean(bin_1), np.mean(bin_10)/np.mean(bin_1), np.mean(bin_11)/np.mean(bin_1), np.mean(bin_12)/np.mean(bin_1), np.mean(bin_13)/np.mean(bin_1), np.mean(bin_14)/np.mean(bin_1), np.mean(bin_15)/np.mean(bin_1), np.mean(bin_16)/np.mean(bin_1), np.mean(bin_17)/np.mean(bin_1), np.mean(bin_18)/np.mean(bin_1), np.mean(bin_19)/np.mean(bin_1), np.mean(bin_20)/np.mean(bin_1)] for element in frequencies: if element == 0.0: frequency_data.append(0.0) else: frequency_data.append(np.log(element)) #print np.log(element) #if (mean(bin_1)) == 0.0: # print "MEAN OF BIN 1 is ZERO!!!!" #print frequency_data #frequency_data = [mean(bin_1), mean(bin_2), mean(bin_3), mean(bin_4), mean(bin_5)] #print "Number of residues in bin: " + str(count) return frequency_data
designed_proteins_00 = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(0.0) + ".dat" designed_proteins_01 = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(0.1) + ".dat" designed_proteins_03 = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(0.3) + ".dat" designed_proteins_06 = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(0.6) + ".dat" designed_proteins_09 = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(0.9) + ".dat" designed_proteins_12 = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(1.2) + ".dat" designed_proteins_003 = "align_data_array_" + pdb_id + "_" + chain_id + "_" + str(0.03) + ".dat" split_natural_1 = "align_natural_sample1_data_array_" + pdb_id + "_" + chain_id + ".dat" split_natural_2 = "align_natural_sample2_data_array_" + pdb_id + "_" + chain_id + ".dat" #Calculates all of the data for comparison (ex. entropy) natural_distribution = analysis_functions.get_AA_distribution(natural_proteins) natural_entropy = analysis_functions.get_native_entropy(natural_proteins) natural_entropy_array = analysis_functions.make_array(natural_entropy) natural_RSA = analysis_functions.get_RSA_Values(natural_proteins) natural_RSA_array = analysis_functions.make_array(natural_RSA) natural_mean_RSA_values.append(mean(natural_RSA_array)) natural_mean_entropy_values.append(mean(natural_entropy_array)) designed_distribution_00 = analysis_functions.get_AA_distribution(designed_proteins_00) designed_entropy_00 = analysis_functions.get_native_entropy(designed_proteins_00) designed_entropy_array_00 = analysis_functions.make_array(designed_entropy_00) designed_RSA_00 = analysis_functions.get_RSA_Values(designed_proteins_00) designed_RSA_array_00 = analysis_functions.make_array(designed_RSA_00) designed_mean_RSA_values_00.append(mean(designed_RSA_array_00)) designed_mean_entropy_values_00.append(mean(designed_entropy_array_00)) designed_distribution_01 = analysis_functions.get_AA_distribution(designed_proteins_01)