def test_runuis_nonswath_rangetree(self): self.assertEqual(len(self.precursors_to_evaluate), 905) par = self.par cursor = self.db.cursor() # If we dont use the DB, we use the rangetree to query and get our list of # precursors that are interfering. In SWATH we dont include a +/- q1_window # around our range or precursors because the precursor window is fixed to # (min_q1,max_q1) and no other precursors are considered. self.myprecursors.getFromDB(par, cursor, self.min_q1 - par.q1_window, self.max_q1 + par.q1_window) rtree = self.myprecursors.build_rangetree() prepare = [] for precursor in self.precursors_to_evaluate: q3_low, q3_high = par.get_q3range_transitions() transitions = precursor.calculate_transitions(q3_low, q3_high) nr_transitions = len(transitions) # Use the rangetree, whether it is swath or not collisions_per_peptide = self.myprecursors.get_collisions_per_peptide_from_rangetree(precursor, precursor.q1 - par.q1_window, precursor.q1 + par.q1_window, transitions, par, rtree) non_uis_list = collider.get_nonuis_list(collisions_per_peptide, par.max_uis) for order in range(1,min(par.max_uis+1, nr_transitions+1)): prepare.append( (len(non_uis_list[order]), collider.choose(nr_transitions, order), precursor.parent_id , order, -1) ) self.assertEqual(len(prepare), 905*par.max_uis) self.assertEqual(prepare[0], (0, 17.0, 1, 1, -1)) final_report = self.get_final_report(par, prepare) self.check_final_report_nonswath(final_report)
def test_runuis_nonswath(self): self.assertEqual(len(self.precursors_to_evaluate), 905) par = self.par cursor = self.db.cursor() prepare = [] for precursor in self.precursors_to_evaluate: q3_low, q3_high = par.get_q3range_transitions() transitions = precursor.calculate_transitions(q3_low, q3_high) nr_transitions = len(transitions) precursors_obj = self.acollider._get_all_precursors(par, precursor, cursor) collisions_per_peptide = collider.get_coll_per_peptide_from_precursors(self.acollider, transitions, precursors_obj, par, precursor) non_uis_list = collider.get_nonuis_list(collisions_per_peptide, par.max_uis) for order in range(1,min(par.max_uis+1, nr_transitions+1)): prepare.append( (len(non_uis_list[order]), collider.choose(nr_transitions, order), precursor.parent_id , order, -1) ) self.assertEqual(len(prepare), 905*par.max_uis) self.assertEqual(prepare[0], (0, 17.0, 1, 1, -1)) final_report = self.get_final_report(par, prepare) self.check_final_report_nonswath(final_report)
def test_runuis_swath(self): self.assertEqual(len(self.precursors_to_evaluate), 905) swath_mode = False par = self.par R = self.R cursor = self.db.cursor() prepare = [] self.min_q1 = 500 self.max_q1 = 525 # Get the precursors (now for 500-525 instead of the full range) ########################################################################### myprecursors = Precursors() cursor = self.db.cursor() myprecursors.getFromDB(par, cursor, self.min_q1 - par.q1_window, self.max_q1 + par.q1_window) rtree = myprecursors.build_rangetree() self.precursors_to_evaluate = myprecursors.getPrecursorsToEvaluate(self.min_q1, self.max_q1) self.assertEqual(len(self.precursors_to_evaluate), 39) isotope_correction = par.isotopes_up_to * R.mass_diffC13 / min(par.parent_charges) temp_precursors = Precursors() temp_precursors.getFromDB(par, self.db.cursor(), self.min_q1 - isotope_correction, self.max_q1) all_swath_precursors = [] for p in temp_precursors.precursors: if(p.included_in_isotopic_range(self.min_q1, self.max_q1, par) ): all_swath_precursors.append(p) for precursor in self.precursors_to_evaluate: q3_low, q3_high = par.get_q3range_transitions() transitions = precursor.calculate_transitions(q3_low, q3_high) nr_transitions = len(transitions) if par.ssrcalc_window > 1000: precursors_obj = [p for p in all_swath_precursors if p.transition_group != precursor.transition_group] else: ssrcalc_low = precursor.ssrcalc - par.ssrcalc_window ssrcalc_high = precursor.ssrcalc + par.ssrcalc_window precursors_obj = [p for p in all_swath_precursors if p.transition_group != precursor.transition_group and p.ssrcalc > ssrcalc_low and p.ssrcalc < ssrcalc_high ] collisions_per_peptide = collider.get_coll_per_peptide_from_precursors(self.acollider, transitions, precursors_obj, par, precursor) non_uis_list = collider.get_nonuis_list(collisions_per_peptide, par.max_uis) for order in range(1,min(par.max_uis+1, nr_transitions+1)): prepare.append( (len(non_uis_list[order]), collider.choose(nr_transitions, order), precursor.parent_id , order, -1) ) self.assertEqual(len(prepare), 39*par.max_uis) self.assertEqual(prepare[0], (5, 8.0, 69, 1, -1) ) final_report = self.get_final_report(par, prepare) self.check_final_report_swath(final_report)
def test_choose(self): self.assertEqual(10, collider.choose( 5,2) ) self.assertEqual(10, collider.choose( 5,3) ) self.assertEqual(45, collider.choose(10,2) ) self.assertEqual(120, collider.choose(10,3) ) self.assertEqual(210, collider.choose(10,4) )
elif not use_experimental_height: # We dont have experimental height data and cannot use C++ code collisions_per_peptide = collider.get_coll_per_peptide(mycollider, transitions, par, peptide_obj, cursor) min_needed = mycollider._sub_getMinNeededTransitions(par, transitions, collisions_per_peptide) #min_needed = mycollider.getMinNeededTransitions_direct(par, transitions, precursors) else: # here we consider the case that we have measured a number of # transitions experimentally and want to know how many of them are # sufficient to establish uniqueness. For this, all we need is # that one tuple of transitions establishes uniqueness since we # were able to measure it above the background noise. collisions_per_peptide = collider.get_coll_per_peptide(mycollider, transitions, par, pep, cursor) for order in range(1,nr_transitions+1): mymax = collider.choose(nr_transitions, order) if use_cpp: non_uis = c_getnonuis.get_non_uis(collisions_per_peptide, order) else: non_uis = set() for pepc in collisions_per_peptide.values(): get_non_uis(pepc, non_uis, i) if len(non_uis) < mymax: break if len(non_uis) < mymax: min_needed = order else: min_needed = -1 spectrum.score = min_needed * nr_transitions spectrum.min_needed = min_needed if min_needed != -1: spectrum.score = nr_transitions - min_needed if not par.quiet: progressm.update(1) get_min_tr_time += time.time() - tmp_time; tmp_time = time.time()
"qadd": qadd, } print q cursor.execute(q) print "Obtained all peptides" alltuples = list(cursor.fetchall()) progressm = progress.ProgressMeter(total=len(alltuples), unit="peptides") prepare = [] total_transitions = 0 total_assays = 0 for kk, pep in enumerate(alltuples): q3_low, q3_high = [400, 1200] q3_low, q3_high = [400, 1400] q3charges = [1, 2] q3charges = [1] # q3_low, q3_high = [0, 12000] # # new way to calculate the precursors transitions = c_getnonuis.calculate_transitions_ch(((-2, pep[0], -1),), q3charges, q3_low, q3_high) # print pep, len(pep[0]), len(transitions) total_transitions += len(transitions) total_assays += sum([collider.choose(len(transitions), i) for i in range(1, 6) if len(transitions) >= i]) progressm.update(1) print "Total number of precursors is ", len(alltuples) print "Total number of transitions is ", total_transitions print "Total number of assays is ", total_assays
print "Obtained all peptides" alltuples = list(cursor.fetchall() ) progressm = progress.ProgressMeter(total=len(alltuples), unit='peptides') prepare = [] total_transitions = 0 total_assays = 0 for kk, pep in enumerate(alltuples): q3_low, q3_high = [400, 1200] q3_low, q3_high = [400, 1400] q3charges = [1,2] q3charges = [1] #q3_low, q3_high = [0, 12000] # #new way to calculate the precursors transitions = c_getnonuis.calculate_transitions_ch( ((-2, pep[0], -1),), q3charges, q3_low, q3_high) #print pep, len(pep[0]), len(transitions) total_transitions += len(transitions) total_assays += sum([ collider.choose(len(transitions), i) for i in range(1,6) if len(transitions) >= i ] ) progressm.update(1) print "Total number of precursors is ", len(alltuples) print "Total number of transitions is ", total_transitions print "Total number of assays is ", total_assays
precursor, precursor.q1 - par.q1_window, precursor.q1 + par.q1_window, transitions, par, rtree) non_uis_list = collider.get_nonuis_list(collisions_per_peptide, MAX_UIS) ## ## Lets count the number of peptides that interfere if count_avg_transitions: tr_arr = [0 for i in range(nr_transitions)] for v in collisions_per_peptide.values(): for vv in v: tr_arr[vv] += 1 allintertr.extend( tr_arr ) ## # for order in range(1,min(MAX_UIS+1, nr_transitions+1)): prepare.append( (len(non_uis_list[order]), collider.choose(nr_transitions, order), precursor.parent_id , order, exp_key) ) progressm.update(1) if count_avg_transitions: print "\n" print "found %s transitions" % len(allintertr)# print "found max of %s interferences" % max(allintertr) print "found average of %s interferences" % ( sum(allintertr) * 1.0 / len(allintertr) ) # if any problems with the packet/buffer length occur, try this: ## set global max_allowed_packet=1000000000; ## set global net_buffer_length=1000000; # cursor.executemany('insert into %s' % restable + ' (non_useable_UIS, total_UIS, \ # parent_key, uisorder, exp_key) values (%s,%s,%s,%s,%s)' , prepare) print "Analyzed %s peptides" % len(precursors_to_evaluate)
############################################################### #strike 1: it has to be global UIS computed_collisions = myprecursors.get_collisions_per_peptide_from_rangetree( precursor, precursor.q1 - par.q1_window, precursor.q1 + par.q1_window, transitions, par, rtree) collisions_per_peptide = computed_collisions # see SRMCollider::Combinatorics::get_non_uis non_useable_combinations = c_getnonuis.get_non_uis( collisions_per_peptide, myorder) srm_ids = [t[1] for t in transitions] tuples_strike1 = 0 if not nr_transitions < myorder: tuples_strike1 = collider.choose(nr_transitions, myorder ) - len(non_useable_combinations) ############################################################### #strike 2: it has to be locally clean if not skip_strike2: ssrcalc_low = ssrcalc - par.ssrcalc_window + 0.001 ssrcalc_high = ssrcalc + par.ssrcalc_window - 0.001 precursor_ids = tuple(c_rangetree.query_tree( q1_low, ssrcalc_low, q1_high, ssrcalc_high ) ) precursors = tuple([parentid_lookup[myid[0]] for myid in precursor_ids #dont select myself if parentid_lookup[myid[0]][2] != pep['transition_group']]) # collisions_per_peptide: dictionary, for each key the set of interfering transitions is stored collisions_per_peptide = c_getnonuis.calculate_collisions_per_peptide_other_ion_series( transitions, precursors, par, q3_low, q3_high, par.q3_window, par.ppm, forceChargeCheck)
for precursor in precursors_to_evaluate: transitions = precursor.calculate_transitions_from_param(par) #correct rounding errors, s.t. we get the same results as before! ssrcalc_low = precursor.ssrcalc - par.ssrcalc_window + 0.001 ssrcalc_high = precursor.ssrcalc + par.ssrcalc_window - 0.001 try: result = c_integrated.wrap_all_bitwise(transitions, precursor.q1 - par.q1_window, ssrcalc_low, precursor.q1 + par.q1_window, ssrcalc_high, precursor.transition_group, min(par.max_uis,len(transitions)), par.q3_window, par.ppm, par.isotopes_up_to, isotope_correction, par, r_tree) except ValueError: print "Too many transitions for", precursor.modification continue for order in range(1,min(par.max_uis+1, len(transitions)+1)): prepare.append( (result[order-1], collider.choose(len(transitions), order), precursor.parent_id , order, exp_key) ) #//break; progressm.update(1) for order in range(1,6): sum_all = sum([p[0]*1.0/p[1] for p in prepare if p[3] == order]) nr_peptides = len([p for p in prepare if p[3] == order]) if not par.quiet and not nr_peptides ==0: print "Order %s, Average non useable UIS %s" % (order, sum_all *1.0/ nr_peptides) #cursor.execute("insert into hroest.result_completegraph_aggr (sum_nonUIS, nr_peptides, uisorder, experiment) VALUES (%s,%s,%s,'%s')" % (sum_all, nr_peptides, order, exp_key)) """ create table hroest.result_completegraph ( exp_key int(11), parent_key int(11), non_useable_UIS int(11),