def run_ff(self, exp: ms.MSExperiment, type: str = 'centroided') -> ms.FeatureMap: """Runs an existing OpenMS feature finder on an experiment. Keyword arguments: exp: the experiment to run the existing feature finder on type: the name of the existing feature finder to run Returns: the features in the experiment. """ if type == 'multiplex': return self.run_ffm(exp) ff = ms.FeatureFinder() ff.setLogType(ms.LogType.NONE) features, seeds = ms.FeatureMap(), ms.FeatureMap() params = ms.FeatureFinder().getParameters( type) # default (Leon's) (modified) params.__setitem__(b'mass_trace:min_spectra', 7) # 10 (5) (7) params.__setitem__(b'mass_trace:max_missing', 1) # 1 (2) (1) params.__setitem__(b'seed:min_score', 0.65) # 0.8 (0.5) (0.65) params.__setitem__(b'feature:min_score', 0.6) # 0.7 (0.5) (0.6) exp.updateRanges() ff.run(type, exp, features, params, seeds) features.setUniqueIds() return features
def load_feature_maps(self, **kwargs): self.reference = oms.FeatureMap() self.toAlign = oms.FeatureMap() self.xml_file = oms.FeatureXMLFile() self.xml_file.load(self.input_fm_1, self.reference) self.xml_file.load(self.input_fm_2, self.toAlign)
def main(self): #after path_parsing method we have self.src_full_name_list for f in get_list_full_names(self.src): print("Map Alignment implementation") print("Source file:", f) # to prepare(init) empty list and entity; self.init_entity(**self.kw) self.reference_map = oms.FeatureMap() self.toAlign_map = oms.FeatureMap() oms.FeatureXMLFile().load(self.reference_file, self.reference_map) oms.FeatureXMLFile().load(f, self.toAlign_map) #Set reference_map file self.ma.entity.setReference(self.reference_map) #3rd step create object for the computed transformation transformation = oms.TransformationDescription() # the 4rd step: self.ma.entity.align(self.toAlign_map, transformation) # the 5th step: is store result into file; self.dst_full_file_name = os.path.join(self.dst,\ convert_src_to_dst_file_name(f, self.dst, self.suffix_dst_files, self.ext_dst_files) ) #print("dst=",dst_full_file_name) oms.FeatureXMLFile().store(self.dst_full_file_name, self.toAlign_map) oms.FeatureXMLFile().store(self.dst_full_file_name, self.reference_map) print("Aligned data stored into:", self.dst_full_file_name)
def run_feature_finder_centroided_on_experiment(input_map): """Function that runs FeatureFinderCentroided on the given input map. Args: input_map (MSExperiment): An OpenMS MSExperiment object. Returns: FeatureMap: A FeatureMap containing the found features from the given experiment. """ # Load data input_map.updateRanges() ff = ms.FeatureFinder() ff.setLogType(ms.LogType.CMD) # Run the feature finder name = 'centroided' features = ms.FeatureMap() seeds = ms.FeatureMap() params = ms.FeatureFinder().getParameters(name) params.__setitem__(b'mass_trace:min_spectra', 5) params.__setitem__(b'mass_trace:max_missing', 2) params.__setitem__(b'seed:min_score', 0.5) params.__setitem__(b'feature:min_score', 0.5) ff.run(name, input_map, features, params, seeds) features.setUniqueIds() return features
def setup_feature_finder(self): # setting up the FeatureFinder self.seeds = oms.FeatureMap() self.ff = oms.FeatureFinder() self.features = oms.FeatureMap() self.ff.setLogType(oms.LogType.CMD)
def detect_peaks_gcms_centroid(ms_experiment, parameters, debug=False): """ Applicable to centroided experiments, also see https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/a16103.html :param ms_experiment: :param parameters: :return: """ print(f"Detecting peaks with {GCMSPeakDetectionMethod.CENTROIDED}") ff = oms.FeatureFinder() if not debug: ff.setLogType(oms.LogType.NONE) else: ff.setLogType(oms.LogType.CMD) # Run the feature finder name = "centroided" pdm_name = GCMSPeakDetectionMethod.CENTROIDED.name parameters['detection_mode'] = name parameters['pdm_name'] = pdm_name # name = parameters['detection_mode'] features = oms.FeatureMap() seeds = oms.FeatureMap() ff_params = oms.FeatureFinder().getParameters(name) ff.run(name, ms_experiment, features, ff_params, seeds) # features.setUniqueIds() features.ensureUniqueId() fh = oms.FeatureXMLFile() feature_storage_path = f"{parameters['input_filename']}_output.featureXML" fh.store(feature_storage_path, features) parameters['feature_storage'] = feature_storage_path print("Found", features.size(), "features") return parameters
def detect_peaks_gcms_peak_picker_wavelet(ms_experiment, parameters): """ Use isotop wavelet to process raw data - can perform poorly on centroided data https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/a16159.html TODO use "corrected" intensity_type :param ms_experiment: :param parameters: :return: """ #set estimate_peak_width to true # Run the feature finder name = "peak_picker_wavelet" parameters['detection_mode'] = name # outdated code in https://github.com/OpenMS/pyopenms-extra/blob/master/src/examples/peakpicker_scipyFFT.py # pick spectrum ff = oms.FeatureFinder() ff.setLogType(oms.LogType.NONE) algo_name = oms.FeatureFinderAlgorithmIsotopeWavelet().getProductName() # picker = oms.FeatureFinderAlgorithmIsotopeWavelet() feature_map = oms.FeatureMap() seeds = oms.FeatureMap() # seeds = FeatureMap() algo_params = ff.getParameters(algo_name) ff.run(algo_name, ms_experiment, feature_map, algo_params, seeds) feature_map.setUniqueIds() parameters = default_store_feature_xml(feature_map, parameters) return parameters
def algorithm(exp, targeted, picker): output = pyopenms.FeatureMap() chrom_map = {} pepmap = {} trmap = {} for i, chrom in enumerate(exp.getChromatograms()): chrom_map[chrom.getNativeID()] = i for i, pep in enumerate(targeted.getPeptides()): pepmap[pep.id] = i for i, tr in enumerate(targeted.getTransitions()): tmp = trmap.get(tr.getPeptideRef(), []) tmp.append(i) trmap[tr.getPeptideRef()] = tmp for key, value in trmap.iteritems(): print key, value transition_group = getTransitionGroup(exp, targeted, key, value, chrom_map) picker.pickTransitionGroup(transition_group) for mrmfeature in transition_group.getFeatures(): features = mrmfeature.getFeatures() for f in features: # TODO # f.getConvexHulls().clear() f.ensureUniqueId() mrmfeature.setSubordinates( features) # add all the subfeatures as subordinates output.push_back(mrmfeature) return output
def test_run_mrmfeaturefinder(self): # load chromatograms chromatograms = pyopenms.MSExperiment() fh = pyopenms.FileHandler() fh.loadExperiment(self.chromatograms, chromatograms) # load TraML file targeted = pyopenms.TargetedExperiment() tramlfile = pyopenms.TraMLFile() tramlfile.load(self.tramlfile, targeted) # Create empty files as input and finally as output empty_swath = pyopenms.MSExperiment() trafo = pyopenms.TransformationDescription() output = pyopenms.FeatureMap() # set up featurefinder and run featurefinder = pyopenms.MRMFeatureFinderScoring() featurefinder.pickExperiment(chromatograms, output, targeted, trafo, empty_swath) self.assertAlmostEqual(output.size(), 3) self.assertAlmostEqual(output[0].getRT(), 3119.092041015, eps) self.assertAlmostEqual(output[0].getIntensity(), 3614.99755859375, eps) self.assertAlmostEqual( output[0].getMetaValue(b"var_xcorr_shape_weighted"), 0.997577965259552, eps) self.assertAlmostEqual(output[0].getMetaValue(b"sn_ratio"), 86.00413513183594, eps)
def main(self): #after path_parsing method we have self.src_full_name_list print("FeatureFindingMetabo implementation") for f in get_list_full_names(self.src): print("Source file:", f) # to prepare(init) empty list and entity; self.init_entity(**self.kw) input_map = oms.PeakMap() # the 1st step: load map; fm = oms.FeatureMap() oms.MzMLFile().load(f, input_map) # the 2nd step: apply_ffm; self.mtd.entity.run(input_map, self.output_mt) self.epd.entity.detectPeaks(self.output_mt, self.splitted_mt) self.ffm.entity.run(self.splitted_mt, fm, self.filtered_mt) # the 3d step: is store result into file; dst_full_file_name = os.path.join(self.dst,\ convert_src_to_dst_file_name(f, self.dst, self.suffix_dst_files, self.ext_dst_files) ) oms.FeatureXMLFile().store(dst_full_file_name, fm) print("Centroided data stored into:", dst_full_file_name)
def algorithm(chromatograms, targeted): # Create empty files as input and finally as output empty_swath = pyopenms.MSExperiment() trafo = pyopenms.TransformationDescription() output = pyopenms.FeatureMap() # set up featurefinder and run featurefinder = pyopenms.MRMFeatureFinderScoring() # set the correct rt use values scoring_params = pyopenms.MRMFeatureFinderScoring().getDefaults() scoring_params.setValue("Scores:use_rt_score", 'false', '') featurefinder.setParameters(scoring_params) featurefinder.pickExperiment(chromatograms, output, targeted, trafo, empty_swath) # get the pairs pairs = [] simple_find_best_feature(output, pairs, targeted) pairs_corrected = pyopenms.MRMRTNormalizer().rm_outliers(pairs, 0.95, 0.6) pairs_corrected = [list(p) for p in pairs_corrected] # // store transformation, using a linear model as default trafo_out = pyopenms.TransformationDescription() trafo_out.setDataPoints(pairs_corrected) model_params = pyopenms.Param() model_params.setValue("symmetric_regression", 'false', '') model_type = "linear" trafo_out.fitModel(model_type, model_params) return trafo_out
def algorithm(exp, targeted, picker, scorer, trafo): output = pyopenms.FeatureMap() scorer.prepareProteinPeptideMaps_(targeted) chrom_map = {} pepmap = {} trmap = {} for i, chrom in enumerate(exp.getChromatograms()): chrom_map[ chrom.getNativeID() ] = i for i, pep in enumerate(targeted.getCompounds() ): pepmap[ pep.id ] = i for i, tr in enumerate(targeted.getTransitions() ): tmp = trmap.get( tr.getPeptideRef() , []) tmp.append( i ) trmap[ tr.getPeptideRef() ] = tmp swath_maps_dummy = [] for key, value in trmap.iteritems(): try: transition_group = getTransitionGroup(exp, targeted, key, value, chrom_map) except Exception: print "Skip ", key, value continue picker.pickTransitionGroup(transition_group); scorer.scorePeakgroups(transition_group, trafo, swath_maps_dummy, output, False); return output
def detect_peaks_gcms_isotopewavelet(ms_experiment, parameters, debug=False): """ Use isotop wavelet to process raw data - can perform poorly on centroided data - also see https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/a16105.html TODO use "corrected" intensity_type :param ms_experiment: :param parameters: :return: """ print(f"Detecting peaks with {GCMSPeakDetectionMethod.ISOTOPEWAVELET}") ff = oms.FeatureFinder() if not debug: ff.setLogType(oms.LogType.NONE) else: ff.setLogType(oms.LogType.CMD) # Run the feature finder name = "isotope_wavelet" pdm_name = GCMSPeakDetectionMethod.ISOTOPEWAVELET.name parameters['detection_mode'] = name parameters['pdm_name'] = pdm_name # name = parameters['detection_mode'] features = oms.FeatureMap() seeds = oms.FeatureMap() ff_params = ff.getParameters(name) # complains about "the extremal length of the wavelet is larger (47661) than the number of data points" # wavelet_length is defined by mz_cutoff / min_spacing # hr_data must be true if high-resolution data (orbitrap, FTICR) # hr_data parameter for isotopewavelet function is_hr_data = parameters.get("hr_data", False) if is_hr_data: hr_key = b"hr_data" # hr_data takes extremely long - >= 2h per measurement of (!)32MB - there are way larger spectra... ff_params.setValue(hr_key, b"true") ff.run(name, ms_experiment, features, ff_params, seeds) features.setUniqueIds() fh = oms.FeatureXMLFile() feature_storage_path = f"{parameters['input_filename']}_output.featureXML" fh.store(feature_storage_path, features) parameters['feature_storage'] = feature_storage_path print("Found", features.size(), "features") return parameters
def run_featurefinder_centroided(input_map, params, seeds, out_path): ff = pms.FeatureFinder() ff.setLogType(pms.LogType.CMD) features = pms.FeatureMap() name = pms.FeatureFinderAlgorithmPicked.getProductName() ff.run(name, input_map, features, params, seeds) fh = pms.FeatureXMLFile() fh.store(out_path, features)
def fit(self, filenames, max_peaks_per_file=1000): try: feature_map = oms.FeatureMap() except: pass n_files = len(filenames) for i, fn in enumerate(filenames): self.progress = 100*(i+1)/n_files feature_map += oms_ffmetabo_single_file( fn, max_peaks_per_file=max_peaks_per_file ) self._feature_map = feature_map
def testFeatureFinder(): """ @tests: FeatureFinder.__init__ FeatureFinder.endProgress FeatureFinder.getLogType FeatureFinder.getParameters FeatureFinder.run FeatureFinder.setLogType FeatureFinder.setProgress FeatureFinder.startProgress """ ff = pyopenms.FeatureFinder() name = pyopenms.FeatureFinderAlgorithmPicked.getProductName() ff.run(name, pyopenms.MSExperiment(), pyopenms.FeatureMap(), pyopenms.Param(), pyopenms.FeatureMap()) _testProgressLogger(ff) p = ff.getParameters(name) _testParam(p)
def collect_convex_hulls(self): self.convex_hulls = [] # opening featureXML xml_file = oms.FeatureXMLFile() self.fmap = oms.FeatureMap() xml_file.load(self.feature_xml_fname, self.fmap) feature_mzs = [] for i, fe in enumerate(self.fmap): feature_mzs.append([i, fe.getMZ()]) feature_mzs = np.array(feature_mzs) feature_mzs = feature_mzs[feature_mzs[:, 1].argsort(), :] # looking up the example features in the featureXML self.examples_oms_features = {} for ex, (mz_t, mz_m) in self.peaks_peaks.items(): i_mz_fe = lookup.find(feature_mzs[:, 1], mz_m, t=10) if i_mz_fe: self.examples_oms_features[feature_mzs[i_mz_fe, 0]] = ex # collecting convex hulls for ife, fe in enumerate(self.fmap): if ife in self.examples_oms_features: hull_list = fe.getConvexHulls() self.extend_hulls(hull_list, ife, 0) subord_feature = fe.getSubordinates() if subord_feature: for subfe in subord_feature: hull_list = subfe.getConvexHulls() self.extend_hulls(hull_list, ife, 1) # columns: rt, mz, feature index, hull index, is sub-feature self.convex_hulls = np.vstack(self.convex_hulls) self.oms_feature_mzs = feature_mzs[feature_mzs[:, 0].argsort(), :]
def id_mapper(in_file, id_file, out_file, params, use_centroid_rt, use_centroid_mz, use_subelements): in_type = pms.FileHandler.getType(in_file) protein_ids = [] peptide_ids = [] pms.IdXMLFile().load(id_file, protein_ids, peptide_ids) mapper = pms.IDMapper() mapper.setParameters(params) if in_type == pms.Type.CONSENSUSXML: file_ = pms.ConsensusXMLFile() map_ = pms.ConsensusMap() file_.load(in_file, map_) mapper.annotate(map_, peptide_ids, protein_ids, use_subelements) addDataProcessing( map_, params, pms.DataProcessing.ProcessingAction.IDENTIFICATION_MAPPING) file_.store(out_file, map_) elif in_type == pms.Type.FEATUREXML: file_ = pms.FeatureXMLFile() map_ = pms.FeatureMap() file_.load(in_file, map_) mapper.annotate(map_, peptide_ids, protein_ids, use_centroid_rt, use_centroid_mz) addDataProcessing( map_, params, pms.DataProcessing.ProcessingAction.IDENTIFICATION_MAPPING) file_.store(out_file, map_) elif in_type == pms.Type.MZQ: file_ = pms.MzQuantMLFile() msq = pms.MSQuantifications() file_.load(in_file, msq) maps = msq.getConsensusMaps() for map_ in maps: mapper.annotate(map_, peptide_ids, protein_ids, use_subelements) addDataProcessing( map_, params, pms.DataProcessing.ProcessingAction.IDENTIFICATION_MAPPING) msq.setConsensusMaps(maps) file_.store(out_file, msq) else: raise Exception("invalid input file format")
def mgf(self): """ Opens an mzML file. """ options = oms.PeakFileOptions() options.setMSLevels([2]) self.mzml = oms.MzMLFile() self.mzml.setOptions(options) self.exp = oms.MSExperiment() self.mzml.load(self.fname, self.exp) self.feature_finder = oms.FeatureFinder() self.ffname = 'centroided' self.features = oms.FeatureMap() self.seeds = oms.FeatureMap() self.params = oms.FeatureFinder().getParameters(self.ffname) self.feature_finder.run( self.ffname, self.exp, self.features, self.params, self.seeds, ) self.features.setUniqueIds()
def match_features_internal(self, features: ms.FeatureMap) -> ms.FeatureMap: """Matches features in a single bin; intended to correct satellite features. The feature in each feature set with the largest convex hull becomes the 'representative' feature of that set and the rest are discarded. Keyword arguments: features: the features of a single bin for intra-bin matching Returns: a matched set of features. """ features.sortByRT() matched = ms.FeatureMap() for i in range(features.size()): feature1 = features[i] max_area = util.polygon_area( feature1.getConvexHull().getHullPoints()) max_feature = feature1 similar = [] first_idx = util.binary_search_left_rt( features, feature1.getRT() - self.RT_THRESHOLD) for j in range(first_idx, features.size()): if i == j: continue feature2 = features[j] if feature2.getRT() > feature1.getRT() + self.RT_THRESHOLD: break if util.similar_features(feature1, feature2, self.RT_THRESHOLD, self.MZ_THRESHOLD): similar.append(feature2) for feature2 in similar: area = util.polygon_area( feature2.getConvexHull().getHullPoints()) if area > max_area: max_area = area max_feature = feature2 if max_feature not in matched: matched.push_back(max_feature) return matched
def oms_ffmetabo_single_file(filename, max_peaks_per_file=5000): feature_map = oms.FeatureMap() mass_traces = [] mass_traces_split = [] mass_traces_filtered = [] exp = oms.MSExperiment() peak_map = oms.PeakMap() options = oms.PeakFileOptions() options.setMSLevels([1]) if filename.lower().endswith('.mzxml'): fh = oms.MzXMLFile() elif filename.lower().endswith('.mzml'): fh = oms.MzMLFile() else: assert False, filename fh.setOptions(options) # Peak map fh.load(filename, exp) #for chrom in exp.getChromatograms(): # peak_map.addChrom(chrom) for spec in exp.getSpectra(): peak_map.addSpectrum(spec) mass_trace_detect = oms.MassTraceDetection() mass_trace_detect.run(peak_map, mass_traces, max_peaks_per_file) elution_peak_detection = oms.ElutionPeakDetection() elution_peak_detection.detectPeaks(mass_traces, mass_traces_split) feature_finding_metabo = oms.FeatureFindingMetabo() feature_finding_metabo.run( mass_traces_split, feature_map, mass_traces_filtered) feature_map.sortByOverallQuality() return feature_map
def testFeatureXMLFile(): """ @tests: FeatureXMLFile.__init__ FeatureXMLFile.load FeatureXMLFile.store FileHandler.__init__ FileHandler.loadFeatures """ fm = pyopenms.FeatureMap() fm.setUniqueIds() fh = pyopenms.FeatureXMLFile() fh.store("test.featureXML", fm) fh.load("test.featureXML", fm) fh = pyopenms.FileHandler() fh.loadFeatures("test.featureXML", fm)
def test_run_mrmrtnormalizer(self): # load chromatograms chromatograms = pyopenms.MSExperiment() fh = pyopenms.FileHandler() fh.loadExperiment(self.chromatograms, chromatograms) # load TraML file targeted = pyopenms.TargetedExperiment() tramlfile = pyopenms.TraMLFile() tramlfile.load(self.tramlfile, targeted) # Create empty files as input and finally as output empty_swath = pyopenms.MSExperiment() trafo = pyopenms.TransformationDescription() output = pyopenms.FeatureMap() # set up featurefinder and run featurefinder = pyopenms.MRMFeatureFinderScoring() # set the correct rt use values scoring_params = pyopenms.MRMFeatureFinderScoring().getDefaults() scoring_params.setValue("Scores:use_rt_score".encode(), 'false'.encode(), ''.encode()) featurefinder.setParameters(scoring_params) featurefinder.pickExperiment(chromatograms, output, targeted, trafo, empty_swath) # get the pairs pairs = [] simple_find_best_feature(output, pairs, targeted) pairs_corrected = pyopenms.MRMRTNormalizer().removeOutliersIterative( pairs, 0.95, 0.6, True, "iter_jackknife") pairs_corrected = [list(p) for p in pairs_corrected] expected = [(1497.56884765625, 1881.0), (2045.9776611328125, 2409.0), (2151.4814453125, 2509.0), (1924.0750732421875, 2291.0), (612.9832153320312, 990.0), (1086.2474365234375, 1470.0), (1133.89404296875, 1519.0), (799.5291137695312, 1188.0), (1397.1541748046875, 1765.0)] for exp, res in zip(expected, pairs_corrected): self.assertAlmostEqual(exp[0], res[0], eps) self.assertAlmostEqual(exp[1], res[1], eps)
def parse_featureXML_GT(feature_file): featuremap = pyopenms.FeatureMap() featurexml = pyopenms.FeatureXMLFile() featurexml.load(feature_file, featuremap) hulls = pd.DataFrame( columns=['rt_min', 'rt_max', 'mz_min', 'mz_max', 'detected', 'pic_id']) for i in range(featuremap.size()): feature = featuremap[i] chs = feature.getConvexHulls() for j in range(len(chs)): pts = chs[j].getHullPoints() hulls.loc[len(hulls)] = [ pts.min(0)[0], pts.max(0)[0], pts.min(0)[1], pts.max(0)[1], False, -1 ] return hulls
def FeatureFindingMetabo(mzfile, noise_threshold_int, snr): finder = 'C:/Program Files/OpenMS/bin/FeatureFinderMetabo.exe' feature_file = 'tmp.featureXML' noise_threshold_int = noise_threshold_int / snr subprocess.call([finder, '-in', mzfile, '-out', feature_file, '-algorithm:common:noise_threshold_int', f'{noise_threshold_int}', '-algorithm:common:chrom_peak_snr', f'{snr}', '-algorithm:common:chrom_fwhm', '10', '-algorithm:mtd:mass_error_ppm', '20', '-algorithm:mtd:reestimate_mt_sd', 'true', '-algorithm:mtd:min_sample_rate', '0', '-algorithm:mtd:min_trace_length', '2', '-algorithm:epd:width_filtering', 'off', '-algorithm:ffm:charge_lower_bound', '1', '-algorithm:ffm:charge_lower_bound', '5']) featuremap = pyopenms.FeatureMap() featurexml = pyopenms.FeatureXMLFile() featurexml.load(feature_file, featuremap) os.remove(feature_file) return featuremap
def FeatureFindingMetabo1(mzfile): exp = pyopenms.MSExperiment() pyopenms.MzMLFile().load(mzfile, exp) mtd_params = pyopenms.MassTraceDetection().getDefaults() mtd = pyopenms.MassTraceDetection() mtd.setParameters(mtd_params) mass_traces = [] mtd.run(exp, mass_traces) epdet_params = pyopenms.ElutionPeakDetection().getDefaults() epdet = pyopenms.ElutionPeakDetection() epdet.setParameters(epdet_params) splitted_mass_traces = [] epdet.detectPeaks(mass_traces, splitted_mass_traces) ffm_params = pyopenms.FeatureFindingMetabo().getDefaults() ffm = pyopenms.FeatureFindingMetabo() ffm.setParameters(ffm_params) feature_map = pyopenms.FeatureMap() ffm.run(splitted_mass_traces, feature_map) return feature_map
def run_featurefinder_centroided(input_path, params, seeds, out_path): fh = pms.MzMLFile() options = pms.PeakFileOptions() options.setMSLevels([1,1]) fh.setOptions(options) input_map = pms.MSExperiment() fh.load(input_path, input_map) input_map.updateRanges() ff = pms.FeatureFinder() ff.setLogType(pms.LogType.CMD) features = pms.FeatureMap() name = pms.FeatureFinderAlgorithmPicked.getProductName() ff.run(name, input_map, features, params, seeds) features.setUniqueIds() addDataProcessing(features, params, pms.ProcessingAction.QUANTITATION) fh = pms.FeatureXMLFile() fh.store(out_path, features)
def extract_data(in_file_name, mz_search): xml_file = oms.FeatureXMLFile() fmap = oms.FeatureMap() xml_file.load(in_file_name, fmap) delta = 0.01 #print( "FeatureMap size=", fmap.size() ) for n in fmap: #if mz_search == n.getMZ(): if abs(mz_search - n.getMZ()) < delta: """ print( "mz=", n.getMZ(), "rt=", n.getRT(), " intensity=", n. getIntensity(), "width=", n.getWidth(), "charge=", n. getCharge() ) """ hull_list = n.getConvexHulls() #getConvexHull() return ConvexHull2D; for hull in hull_list: hull_points = hull.getHullPoints() # hull_points is numpy.ndarray; #print( "hull_points.size=", hull_points.size ) for p in hull_points: print( p[0], p[1] ) subord_feature = n.getSubordinates() if subord_feature: #print("getSubordinates:") for f in subord_feature: hull_list = f.getConvexHulls() for hull in hull_list: hull_points = hull.getHullPoints() # hull_points is numpy.ndarray; #print( "hull_points.size=", hull_points.size ) for p in hull_points: print( p[0], p[1] ) else: continue
def main(options): # load featureXML features = pyopenms.FeatureMap() fh = pyopenms.FileHandler() fh.loadFeatures(options.infile, features) # load TraML file targeted = pyopenms.TargetedExperiment(); tramlfile = pyopenms.TraMLFile(); tramlfile.load(options.traml_in, targeted); # write TSV file filename = options.infile.split(".")[0] fh = open(options.outfile, "w") wr = csv.writer(fh, delimiter='\t') header = get_header(features) wr.writerow(header) for f in features: keys = [] f.getKeys(keys) row = convert_to_row(f,targeted,filename,keys,filename) wr.writerow(row)
import pyopenms """ Producing the test data for TOPP_FeatureLinkerUnlabeledQT_5 and TOPP_FeatureLinkerUnlabeledQT_6 """ fmaps = [pyopenms.FeatureMap() for i in range(3)] pepids = [] pepseq = ["PEPTIDEA", "PEPTIDEK", "PEPTIDER"] for s in pepseq: pepid = pyopenms.PeptideIdentification() hit = pyopenms.PeptideHit() hit.setSequence(pyopenms.AASequence.fromString(s, True)) pepid.insertHit(hit) pepid.setIdentifier("Protein0") pepids.append(pepid) protid = pyopenms.ProteinIdentification() protid.setIdentifier("Protein0") for i, fmap in enumerate(fmaps): fmap.setProteinIdentifications([protid]) # add 3 features to each map, but with a twist (adding different peptide ids to different maps) for k in range(3): f = pyopenms.Feature() f.setRT(300 + k * 100 + i * 10) f.setMZ(500 + k * 0.001 + i * 0.01) f.setIntensity(500 + i * 100) f.setMetaValue("sequence", pepseq[(i + k) % 3]) # easier viewing in TOPPView f.setPeptideIdentifications([pepids[(i + k) % 3]]) fmap.push_back(f) pyopenms.FeatureXMLFile().store("output_%s.featureXML" % i, fmap)