示例#1
0
 def test_load_filename_errors_2(self, filename, expects, datadir):
     with pytest.raises(expects):
         load_peaks(datadir / filename)
示例#2
0
    def test_load_peaks(self, filtered_peak_list, datadir, outputdir,
                        peak_list_filename):
        loaded_peak_list = load_peaks(peak_list_filename)

        assert loaded_peak_list == filtered_peak_list
示例#3
0
 def test_load_filename_errors_1(self, filename):
     with pytest.raises(TypeError):
         load_peaks(filename)
示例#4
0
	def qualitative_processing(self, sample_name, rt_list, n_peaks=80):
		"""
		
		
		:param sample_name:
		:type sample_name:
		:param rt_list:
		:type rt_list:
		:param n_peaks: Number of peaks to include in the report
		:type n_peaks: int
		
		:return:
		:rtype:
		"""
		
		# Initialise variables
		time_list = []
		intensity_list = []
		
		# Load saved TIC
		with open(os.path.join(self.config.expr_dir, "{}_tic.dat".format(sample_name))) as tic_file:
			ticreader = csv.reader(tic_file, delimiter=" ")
			for row in ticreader:
				row = list(filter(None, row))
				intensity_list.append(float(row[1]))
				time_list.append(float(row[0]))
		
		# tic = IonChromatogram(numpy.array(intensity_list), time_list)
		
		# Load the peak list from file
		peak_list = load_peaks(os.path.join(self.config.expr_dir, "{}_peaks.dat".format(sample_name)))
		
		# Obtain area for each peak
		peak_area_list = []
		for peak in peak_list:
			area = peak.get_area()
			peak_area_list.append(area)
		
		# Write output to CSV file
		combined_csv_file = os.path.join(self.config.csv_dir, "{}_COMBINED.csv".format(sample_name))
		with open(combined_csv_file, "w") as combine_csv:
			
			# Sample name and header row
			combine_csv.write(f"""{sample_name}
Retention Time;Peak Area;;Lib;Match;R Match;Name;CAS Number;Notes
""")
		
			report_buffer = []
			# Filter to those peaks present in all samples, by UID
			for peak in peak_list:
				# if str(rounders(peak.get_rt()/60,"0.000")) in rt_list:
				# print(peak.get_rt()/60.0)
				# TODO: there is a simpler way to do this as part of the DPA functions
				# 	DDF 20/11/19
				if peak.get_rt() / 60.0 in rt_list:
					report_buffer.append([
						'',
						# rounders(peak.get_rt()/60,"0.000"),
						(peak.get_rt() / 60),
						'',
						peak.get_mass_spectrum(),
						# '{:,}'.format(rounders(peak.get_area()/60,"0.0"))
						'{:,}'.format(peak.get_area() / 60)
					])
			
			# TODO: I thought this was supposed to filter to show the 80 largest peaks,
			# 	but I'm not sure it actually does that
			# 	DDF 20/11/19
			
			# Reverse list order
			report_buffer = report_buffer[::-1]
			# Get last 80 peaks
			report_buffer = report_buffer[:n_peaks]
			# Sort by retention time
			report_buffer.sort(key=operator.itemgetter(1))
			
			# Iterate over peaks
			for row_idx, row in enumerate(report_buffer):
				# TODO: some tidying up here; is writing to disk the most efficient?
				# 	DDF 20/11/19
				
				# if row_idx == 19:
				
				# Get mass spectrum
				ms = row[3]
				
				# Number of hits to get from NIST MS Search
				n_hits = 10
				
				# Create MSP file for the peak
				self.create_msp("{}_{}".format(sample_name, row[1]), ms.mass_list, ms.mass_spec)
				
				matches_dict = self.nist_ms_comparison(
					"{}_{}".format(sample_name, row[1]),
					ms.mass_list, ms.mass_spec, n_hits
				)
				
				combine_csv.write("{};{};Page {} of 80;;;;;;{}\n".format(row[1], row[4], row_idx + 1, row[2]))
				
				for hit in range(1, n_hits + 1):
					combine_csv.write(';;{};{};{};{};{};{};\n'.format(
						hit,
						matches_dict["Hit{}".format(hit)]["Lib"],
						matches_dict["Hit{}".format(hit)]["MF"],
						matches_dict["Hit{}".format(hit)]["RMF"],
						matches_dict["Hit{}".format(hit)]["Name"].replace(";", ":"),
						matches_dict["Hit{}".format(hit)]["CAS"],
					))
				
				time.sleep(2)
				
		return 0