Example #1
0
    def testFunction(self):

        for method in [self.eightplex, self.fourplex, self.tmt]:
            inst = pyopenms.IsobaricIsotopeCorrector(method)
            map1 = pyopenms.ConsensusMap()
            map2 = pyopenms.ConsensusMap()
            assert inst.correctIsotopicImpurities(map1, map2)
    def testFunction(self):

        for method in [self.eightplex, self.fourplex, self.tmt]:
            inst = pyopenms.IsobaricNormalizer(method)
            map1 = pyopenms.ConsensusMap()
            assert inst.normalize is not None
            inst.normalize(map1)
    def testFunction(self):

        for method in [self.eightplex, self.fourplex, self.tmt]:
            inst = pyopenms.IsobaricChannelExtractor(method)
            map1 = pyopenms.ConsensusMap()
            exp = pyopenms.MSExperiment()
            assert inst.extractChannels is not None
Example #4
0
def id_mapper(in_file, id_file, out_file, params, use_centroid_rt,
              use_centroid_mz, use_subelements):

    in_type = pms.FileHandler.getType(in_file)

    protein_ids = []
    peptide_ids = []

    pms.IdXMLFile().load(id_file, protein_ids, peptide_ids)

    mapper = pms.IDMapper()
    mapper.setParameters(params)

    if in_type == pms.Type.CONSENSUSXML:
        file_ = pms.ConsensusXMLFile()
        map_ = pms.ConsensusMap()
        file_.load(in_file, map_)
        mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
        addDataProcessing(
            map_, params,
            pms.DataProcessing.ProcessingAction.IDENTIFICATION_MAPPING)
        file_.store(out_file, map_)

    elif in_type == pms.Type.FEATUREXML:
        file_ = pms.FeatureXMLFile()
        map_ = pms.FeatureMap()
        file_.load(in_file, map_)
        mapper.annotate(map_, peptide_ids, protein_ids, use_centroid_rt,
                        use_centroid_mz)
        addDataProcessing(
            map_, params,
            pms.DataProcessing.ProcessingAction.IDENTIFICATION_MAPPING)
        file_.store(out_file, map_)

    elif in_type == pms.Type.MZQ:
        file_ = pms.MzQuantMLFile()
        msq = pms.MSQuantifications()
        file_.load(in_file, msq)
        maps = msq.getConsensusMaps()
        for map_ in maps:
            mapper.annotate(map_, peptide_ids, protein_ids, use_subelements)
            addDataProcessing(
                map_, params,
                pms.DataProcessing.ProcessingAction.IDENTIFICATION_MAPPING)
        msq.setConsensusMaps(maps)
        file_.store(out_file, msq)

    else:
        raise Exception("invalid input file format")
def link(in_files, out_file, keep_subelements, params):

    in_types = set(pms.FileHandler.getType(in_) for in_ in in_files)

    if in_types == set((pms.Type.CONSENSUSXML, )):
        link_features = False
    elif in_types == set((pms.Type.FEATUREXML, )):
        link_features = True
    else:
        raise Exception("different kinds of input files")

    algorithm_parameters = params.copy("algorithm:", True)
    algorithm = pms.FeatureGroupingAlgorithmQT()
    algorithm.setParameters(algorithm_parameters)

    out_map = pms.ConsensusMap()
    fds = out_map.getColumnHeaders()
    if link_features:
        f = pms.FeatureXMLFile()
        maps = []
        for i, in_file in enumerate(in_files):
            map_ = pms.FeatureMap()
            f.load(in_file, map_)

            # set filedescriptions
            fd = fds.get(i, pms.ColumnHeader())
            fd.filename = in_file
            fd.size = map_.size()
            fd.unique_id = map_.getUniqueId()
            fds[i] = fd
            maps.append(map_)
        out_map.setColumnHeaders(fds)
        algorithm.group(maps, out_map)
    else:
        f = pms.ConsensusXMLFile()
        maps = []
        for i, in_file in enumerate(in_files):
            map_ = pms.ConsensusMap()
            f.load(in_file, map_)
            maps.append(map_)
        algorithm.group(maps, out_map)

        if not keep_subelements:
            for i in range(len(in_files)):
                # set filedescriptions
                fd = fds.get(i, pms.ColumnHeader())
                fd.filename = in_files[i]
                fd.size = maps[i].size()
                fd.unique_id = maps[i].getUniqueId()
                fds[i] = fd
            out_map.setColumnHeaders(fds)
        else:
            algorithm.transferSubelements(maps, out_map)

    out_map.setUniqueIds()
    addDataProcessing(out_map, params,
                      pms.DataProcessing.ProcessingAction.FEATURE_GROUPING)

    pms.ConsensusXMLFile().store(out_file, out_map)

    sizes = []
    for feat in out_map:
        sizes.append(feat.size())

    c = Counter(sizes)
    print "Number of consensus features:"
    for size, count in c.most_common():
        print "   of size %2d : %6d" % (size, count)
    print "        total : %6d" % out_map.size()
Example #6
0
def main(options):

    # make sure that the ids are "correct" for the testcase
    date_time = pyopenms.DateTime()
    if options.test:
        date_time.set("1999-12-31 23:59:59")
        pyopenms.UniqueIdGenerator().setSeed(date_time)
    else:
        date_time = pyopenms.DateTime.now()

    exp = pyopenms.MSExperiment()
    out_map = pyopenms.ConsensusMap()
    pyopenms.FileHandler().loadExperiment(options.infile, exp)
    exp.updateRanges()

    #
    # 1. filter MS1 level (only keep MS1)
    #
    tmp = copy.copy(exp)
    tmp.clear(False)
    for spectrum in exp:
        if spectrum.getMSLevel() == 1:
            tmp.push_back(spectrum)
    exp = tmp
    exp.sortSpectra(True)

    #
    # 2. set parameters
    #
    analyzer = pyopenms.SILACAnalyzer()
    analyzer.initialize(
        # section sample
        options.selected_labels,
        options.charge_min,
        options.charge_max,
        options.missed_cleavages,
        options.isotopes_per_peptide_min,
        options.isotopes_per_peptide_max,
        # section "algorithm"
        options.rt_threshold,
        options.rt_min,
        options.intensity_cutoff,
        options.intensity_correlation,
        options.model_deviation,
        options.allow_missing_peaks,
        # labels
        options.label_identifiers)

    #
    # 3. run
    #
    analyzer.run_all(exp, out_map)

    #
    # 4. set dataprocessing and output meta information
    #
    out_map.sortByPosition()

    dp = out_map.getDataProcessing()
    p = pyopenms.DataProcessing()
    p.setProcessingActions(
        set([
            pyopenms.ProcessingAction().DATA_PROCESSING,
            pyopenms.ProcessingAction().PEAK_PICKING,
            pyopenms.ProcessingAction().FILTERING,
            pyopenms.ProcessingAction().QUANTITATION
        ]))
    p.setCompletionTime(date_time)

    sw = p.getSoftware()
    sw.setName("SILACAnalyzer")
    if options.test:
        sw.setVersion("version_string")
        p.setSoftware(sw)
        p.setMetaValue("parameter: mode", "test_mode")
    else:
        sw.setVersion("pyTOPP v1.10")
        p.setSoftware(sw)
    dp.append(p)
    out_map.setDataProcessing(dp)

    #
    # 5. write output
    #
    analyzer.writeConsensus(pyopenms.String(options.outfile), out_map)
Example #7
0
    def test_run_SILACAnalyzer(self):

        exp = pyopenms.MSExperiment()
        out_map = pyopenms.ConsensusMap()
        pyopenms.FileHandler().loadExperiment(self.infile, exp)
        exp.updateRanges()

        #
        # 1. filter MS1 level (only keep MS1)
        #
        tmp = copy.copy(exp)
        for spectrum in exp:
            if spectrum.getMSLevel() == 1:
                tmp.addSpectrum(spectrum)
        exp = tmp
        exp.sortSpectra(True)

        selected_labels = "Lys8"
        charge_min = 2
        charge_max = 2
        missed_cleavages = 0
        isotopes_per_peptide_min = 3
        isotopes_per_peptide_max = 3

        rt_threshold = 80
        rt_min = 0
        intensity_cutoff = 10
        intensity_correlation = 0.95
        model_deviation = 10
        allow_missing_peaks = False
        label_identifiers = {"Lys8": 8.0141988132}

        #
        # 2. set parameters
        #
        analyzer = pyopenms.SILACAnalyzer()
        analyzer.initialize(
            # section sample
            selected_labels,
            charge_min,
            charge_max,
            missed_cleavages,
            isotopes_per_peptide_min,
            isotopes_per_peptide_max,
            # section "algorithm"
            rt_threshold,
            rt_min,
            intensity_cutoff,
            intensity_correlation,
            model_deviation,
            allow_missing_peaks,
            # labels
            label_identifiers)

        #
        # 3. run
        #
        analyzer.run_all(exp, out_map)

        out_map.sortByPosition()

        self.assertEqual(out_map.size(), 3)
        self.assertEqual(out_map[0].getQuality(), 8.0)
        self.assertEqual(out_map[1].getQuality(), 8.0)
        self.assertEqual(out_map[2].getQuality(), 8.0)

        self.assertAlmostEqual(out_map[0].getRT(), 6632.409179688, eps)
        self.assertAlmostEqual(out_map[1].getRT(), 6635.169433594, eps)
        self.assertAlmostEqual(out_map[2].getRT(), 6657.56445312, eps)

        self.assertAlmostEqual(out_map[0].getMZ(), 668.321350097656, eps)
        self.assertAlmostEqual(out_map[1].getMZ(), 670.894470214844, eps)
        self.assertAlmostEqual(out_map[2].getMZ(), 668.8262329102, eps)