コード例 #1
0
    def setUp(self):
        # Set up dirs
        self.dirname = os.path.dirname(os.path.abspath(__file__))
        self.topdir = os.path.join(os.path.join(self.dirname, ".."), "..")
        self.datadir = os.path.join(os.path.join(self.topdir, "test"), "data")
        self.scriptdir = os.path.join(self.topdir, "analysis")

        # Set up files
        peakgroups_file = os.path.join(self.datadir, "imputeValues/imputeValues_5_input.csv")
        mzml_file = os.path.join(self.datadir, "imputeValues/r004_small/split_olgas_otherfile.chrom.mzML")

        # Parameters
        self.initial_alignment_cutoff = 0.0001
        fdr_cutoff_all_pg = 1.0
        max_rt_diff = 30

        # Read input
        reader = SWATHScoringReader.newReader([peakgroups_file], "openswath", readmethod="complete")
        self.new_exp = MRExperiment()
        self.new_exp.runs = reader.parse_files()
        self.multipeptides = self.new_exp.get_all_multipeptides(fdr_cutoff_all_pg, verbose=False)

        # Align all against all
        self.tr_data = transformations.LightTransformationData()
        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        for run_0 in self.new_exp.runs:
            for run_1 in self.new_exp.runs:
                helper.addDataToTrafo(self.tr_data, run_0, run_1, spl_aligner, self.multipeptides, "linear", 30)

        # Select two interesting peptides
        pepname = "21517_C[160]NVVISGGTGSGK/2_run0 0 0"
        self.current_mpep1 = [m for m in self.multipeptides if m.getAllPeptides()[0].get_id() == pepname][0]

        pepname = "26471_GYEDPPAALFR/2_run0 0 0"
        self.current_mpep2 = [m for m in self.multipeptides if m.getAllPeptides()[0].get_id() == pepname][0]
コード例 #2
0
    def setUp(self):

        # Set up dirs
        self.dirname = os.path.dirname(os.path.abspath(__file__))
        self.topdir = os.path.join(os.path.join(self.dirname, ".."), "..")
        self.datadir = os.path.join(os.path.join(self.topdir, "test"), "data")
        self.scriptdir = os.path.join(self.topdir, "analysis")

        # Set up files
        peakgroups_file = os.path.join(self.datadir, "imputeValues/imputeValues_5_input.csv")
        fdr_cutoff_all_pg = 1.0

        # Read input
        reader = SWATHScoringReader.newReader([peakgroups_file], "openswath", readmethod="complete")
        self.exp = MRExperiment()
        self.exp.runs = reader.parse_files()
        self.multipeptides = self.exp.get_all_multipeptides(fdr_cutoff_all_pg, verbose=False)

        # Set up files nr2 
        peakgroups_file = os.path.join(self.datadir, "feature_alignment_7_openswath_input.csv")
        reader = SWATHScoringReader.newReader([peakgroups_file], "openswath", readmethod="complete")
        self.exp2 = MRExperiment()
        self.exp2.runs = reader.parse_files()
        self.multipeptides2 = self.exp2.get_all_multipeptides(fdr_cutoff_all_pg, verbose=False)

        # Select the best peakgroup per peptide and select it for writing out
        fdr_cutoff = 0.01
        for mpep in self.multipeptides2:
            for prgr in mpep.getAllPeptides():
                minpg = min( [(pg.get_fdr_score(), pg) for pg in prgr.peakgroups] )
                if minpg[0] < fdr_cutoff:
                    minpg[1].select_this_peakgroup()
コード例 #3
0
    def _read_peakgroup_files(self, aligned_pg_files, swathfiles):
        """
        The peakgroup files have to have the following columns:
            - FullPeptideName
            - Charge
            - leftWidth
            - rightWidth
            - m_score
            - Intensity
            - align_runid
            - transition_group_id
        """

        # Read in the peakgroup files, parse them and map across runs
        reader = SWATHScoringReader.newReader(aligned_pg_files,
                                              "openswath",
                                              readmethod="gui",
                                              errorHandling="loose")
        new_exp = Experiment()
        new_exp.runs = reader.parse_files(REALIGN_RUNS)
        multipeptides = new_exp.get_all_multipeptides(FDR_CUTOFF,
                                                      verbose=False)

        # Build map of the PeptideName/Charge to the individual multipeptide
        peakgroup_map = {}
        for m in multipeptides:
            pg = m.find_best_peptide_pg()
            identifier = pg.get_value("FullPeptideName") + "/" + pg.get_value(
                "Charge")
            peakgroup_map[identifier] = m

        for swathrun in swathfiles.getSwathFiles():
            if ONLY_SHOW_QUANTIFIED:
                intersection = set(
                    swathrun.get_all_precursor_ids()).intersection(
                        peakgroup_map.keys())
                todelete = set(
                    swathrun.get_all_precursor_ids()).difference(intersection)
                if len(intersection) == 0:
                    print "Could not find any intersection between identifiers in your transition file and the provided chromatograms"
                    print len(intersection)
                swathrun.remove_precursors(todelete)

            # for each precursor in this run, identify the best peakgroup and store the value
            for precursor_id in swathrun.get_all_precursor_ids():
                if not peakgroup_map.has_key(precursor_id):
                    continue

                m = peakgroup_map[precursor_id]
                if m.hasPrecursorGroup(swathrun.runid):
                    for pg in m.getPrecursorGroup(
                            swathrun.runid).getAllPeakgroups():
                        l, r = [
                            float(pg.get_value("leftWidth")),
                            float(pg.get_value("rightWidth"))
                        ]
                        fdrscore = float(pg.get_value("m_score"))
                        intensity = float(pg.get_value("Intensity"))
                        swathrun.add_peakgroup_data(precursor_id, l, r,
                                                    fdrscore, intensity)
コード例 #4
0
def main(options):
    import time

    # Read the files
    start = time.time()
    reader = SWATHScoringReader.newReader(options.infiles, options.file_format,
                                          options.readmethod)
    runs = reader.parse_files(True)
    # Create experiment
    this_exp = MRExperiment()
    this_exp.set_runs(runs)
    print("Reading the input files took %ss" % (time.time() - start))

    # Fix input filenames
    fix_input_fnames(options, runs)

    # Map the precursors across multiple runs, determine the number of
    # precursors in all runs without alignment.
    start = time.time()
    multipeptides = this_exp.get_all_multipeptides(1.0, verbose=True)
    print("Mapping the precursors took %ss" % (time.time() - start))

    for m in multipeptides:

        # Error handling if somehow more than one peakgroup was selected ...
        for p in m.getAllPeptides():
            p._fixSelectedPGError(fixMethod="BestScore")

        if len(m.get_selected_peakgroups()) > 0:
            continue

        for p in m.get_peptides():
            if len(list(p.get_all_peakgroups())) != 1:
                print(p)
                print(dir(p))
                print(p.get_run_id())
                for pg in p.get_all_peakgroups():
                    print(pg.print_out())
                print(len(list(p.get_all_peakgroups())))

            assert len(list(p.get_all_peakgroups())) == 1
            for pg in p.get_all_peakgroups():
                pg.select_this_peakgroup()

    start = time.time()
    if len(options.matrix_outfile) > 0:
        write_out_matrix_file(
            options.matrix_outfile,
            this_exp.runs,
            multipeptides,
            options.min_frac_selected,
            style=options.output_method,
            write_requant=not options.remove_requant_values,
            aligner_mscore_treshold=options.aligner_mscore_threshold)
    print("Writing output took %ss" % (time.time() - start))
コード例 #5
0
def main(options):
    import time

    # Read the files
    start = time.time()
    reader = SWATHScoringReader.newReader(options.infiles, options.file_format, options.readmethod)
    runs = reader.parse_files(True)
    # Create experiment
    this_exp = MRExperiment()
    this_exp.set_runs(runs)
    print("Reading the input files took %ss" % (time.time() - start) )

    # Fix input filenames
    fix_input_fnames(options, runs)

    # Map the precursors across multiple runs, determine the number of
    # precursors in all runs without alignment.
    start = time.time()
    multipeptides = this_exp.get_all_multipeptides(1.0, verbose=True)
    print("Mapping the precursors took %ss" % (time.time() - start) )

    for m in multipeptides:

        # Error handling if somehow more than one peakgroup was selected ... 
        for p in m.getAllPeptides():
            p._fixSelectedPGError(fixMethod="BestScore")

        if len(m.get_selected_peakgroups() ) > 0:
            continue 

        for p in m.get_peptides():
            if len(list(p.get_all_peakgroups())) != 1:
                print(p)
                print(dir(p))
                print(p.get_run_id())
                for pg in p.get_all_peakgroups():
                    print (pg.print_out())
                print (len(list(p.get_all_peakgroups())))

            assert len(list(p.get_all_peakgroups())) == 1
            for pg in p.get_all_peakgroups():
               pg.select_this_peakgroup()

    start = time.time()
    if len(options.matrix_outfile) > 0:
        write_out_matrix_file(options.matrix_outfile, this_exp.runs, multipeptides,
                              options.min_frac_selected, style=options.output_method, 
                              write_requant = not options.remove_requant_values, aligner_mscore_treshold=options.aligner_mscore_threshold)
    print("Writing output took %ss" % (time.time() - start) )
コード例 #6
0
    def test_parse_files(self):

        filename = os.path.join(self.datadir_gui, "dataset3.csv")
        filename_mzml = os.path.join(self.datadir_gui, "dataset3.mzML")
        r = reader.SWATHScoringReader.newReader([filename], "openswath", readmethod="gui", errorHandling="loose")

        new_exp = Experiment()
        new_exp.runs = r.parse_files(True)
        multipeptides = new_exp.get_all_multipeptides(1.0, verbose=False)

        # Build map of the PeptideName/Charge to the individual multipeptide
        peakgroup_map = {}
        mapper.buildPeakgroupMap(multipeptides, peakgroup_map)

        self.assertEqual(len(peakgroup_map.keys()), 2)
        self.assertEqual(sorted(list(peakgroup_map.keys())), ['testpeptide/0', 'testpeptide/0_pr'])
コード例 #7
0
ファイル: MSData.py プロジェクト: YetsunLam/msproteomicstools
    def _read_peakgroup_files(self, aligned_pg_files, swathfiles):
        """
        The peakgroup files have to have the following columns:
            - FullPeptideName
            - Charge
            - leftWidth
            - rightWidth
            - m_score
            - Intensity
            - align_runid
            - transition_group_id
        """

        # Read in the peakgroup files, parse them and map across runs
        reader = SWATHScoringReader.newReader(aligned_pg_files, "openswath", readmethod="gui", errorHandling="loose")
        new_exp = Experiment()
        new_exp.runs = reader.parse_files(REALIGN_RUNS)
        multipeptides = new_exp.get_all_multipeptides(FDR_CUTOFF, verbose=False)

        # Build map of the PeptideName/Charge to the individual multipeptide
        peakgroup_map = {}
        for m in multipeptides:
            pg = m.find_best_peptide_pg()
            identifier = pg.get_value("FullPeptideName") + "/" + pg.get_value("Charge")
            peakgroup_map[ identifier ] = m

        for swathrun in swathfiles.getSwathFiles():
            if ONLY_SHOW_QUANTIFIED:
                intersection = set(swathrun.get_all_precursor_ids()).intersection( peakgroup_map.keys() )
                todelete = set(swathrun.get_all_precursor_ids()).difference(intersection)
                if len(intersection) == 0:
                    print "Could not find any intersection between identifiers in your transition file and the provided chromatograms"
                    print len(intersection)
                swathrun.remove_precursors(todelete)

            # for each precursor in this run, identify the best peakgroup and store the value
            for precursor_id in swathrun.get_all_precursor_ids():
                if not peakgroup_map.has_key(precursor_id): 
                    continue

                m = peakgroup_map[ precursor_id ]
                if m.hasPrecursorGroup(swathrun.runid):
                    for pg in m.getPrecursorGroup(swathrun.runid).getAllPeakgroups():
                        l,r       = [ float(pg.get_value("leftWidth")), float(pg.get_value("rightWidth")) ]
                        fdrscore  = float(pg.get_value("m_score"))
                        intensity = float(pg.get_value("Intensity"))
                        swathrun.add_peakgroup_data(precursor_id,l,r, fdrscore, intensity)
コード例 #8
0
    def test_parse_files(self):

        filename = os.path.join(self.datadir_gui, "dataset3.csv")
        filename_mzml = os.path.join(self.datadir_gui, "dataset3.mzML")
        r = reader.SWATHScoringReader.newReader([filename],
                                                "openswath",
                                                readmethod="gui",
                                                errorHandling="loose")

        new_exp = Experiment()
        new_exp.runs = r.parse_files(True)
        multipeptides = new_exp.get_all_multipeptides(1.0, verbose=False)

        # Build map of the PeptideName/Charge to the individual multipeptide
        peakgroup_map = {}
        mapper.buildPeakgroupMap(multipeptides, peakgroup_map)

        self.assertEqual(len(peakgroup_map.keys()), 2)
        self.assertEqual(sorted(list(peakgroup_map.keys())),
                         ['testpeptide/0', 'testpeptide/0_pr'])
コード例 #9
0
class TestAlignment(unittest.TestCase):

    def setUp(self):
        # Set up dirs
        self.dirname = os.path.dirname(os.path.abspath(__file__))
        self.topdir = os.path.join(os.path.join(self.dirname, ".."), "..")
        self.datadir = os.path.join(os.path.join(self.topdir, "test"), "data")
        self.scriptdir = os.path.join(self.topdir, "analysis")

        # Set up files
        peakgroups_file = os.path.join(self.datadir, "imputeValues/imputeValues_5_input.csv")
        mzml_file = os.path.join(self.datadir, "imputeValues/r004_small/split_olgas_otherfile.chrom.mzML")

        # Parameters
        self.initial_alignment_cutoff = 0.0001
        fdr_cutoff_all_pg = 1.0
        max_rt_diff = 30

        # Read input
        reader = SWATHScoringReader.newReader([peakgroups_file], "openswath", readmethod="complete")
        self.new_exp = MRExperiment()
        self.new_exp.runs = reader.parse_files()
        self.multipeptides = self.new_exp.get_all_multipeptides(fdr_cutoff_all_pg, verbose=False)

        # Align all against all
        self.tr_data = transformations.LightTransformationData()
        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        for run_0 in self.new_exp.runs:
            for run_1 in self.new_exp.runs:
                helper.addDataToTrafo(self.tr_data, run_0, run_1, spl_aligner, self.multipeptides, "linear", 30)

        # Select two interesting peptides
        pepname = "21517_C[160]NVVISGGTGSGK/2_run0 0 0"
        self.current_mpep1 = [m for m in self.multipeptides if m.getAllPeptides()[0].get_id() == pepname][0]

        pepname = "26471_GYEDPPAALFR/2_run0 0 0"
        self.current_mpep2 = [m for m in self.multipeptides if m.getAllPeptides()[0].get_id() == pepname][0]

    def test_shortestDistance_1(self):

        rid = "0_0"
        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        dist_matrix = getDistanceMatrix(self.new_exp, self.multipeptides, spl_aligner)

        # Select peakgroups, compute left/right border
        selected_pg = [pg for p in self.current_mpep1.getAllPeptides() for pg in p.get_all_peakgroups() if pg.get_cluster_id() == 1]
        rmap = dict([(r.get_id(),i) for i,r in enumerate(self.new_exp.runs) ])
        border_l, border_r = integrationBorderShortestDistance(selected_pg, 
            rid, self.tr_data, dist_matrix, rmap)

        # Direct transformation from 0_2 to 0_0
        self.assertAlmostEqual(border_l, self.tr_data.getTrafo("0_2", "0_0").predict([ 240.0 ])[0])
        self.assertAlmostEqual(border_r, self.tr_data.getTrafo("0_2", "0_0").predict([ 260.0 ])[0])

        self.assertAlmostEqual(border_l, 77.992277992277934)
        self.assertAlmostEqual(border_r, 84.1698841699)

    def test_shortestPath_1(self):

        rid = "0_0"
        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        tree = MinimumSpanningTree(getDistanceMatrix(self.new_exp, self.multipeptides, spl_aligner))
        tree_mapped = [(self.new_exp.runs[a].get_id(), self.new_exp.runs[b].get_id()) for a,b in tree]

        # Select peakgroups, compute left/right border
        selected_pg = [pg for p in self.current_mpep1.getAllPeptides() for pg in p.get_all_peakgroups() if pg.get_cluster_id() == 1]
        border_l, border_r = integrationBorderShortestPath(selected_pg, 
            rid, self.tr_data, tree_mapped)

        # Direct transformation from 0_2 to 0_0
        self.assertAlmostEqual(border_l, self.tr_data.getTrafo("0_2", "0_0").predict([ 240.0 ])[0])
        self.assertAlmostEqual(border_r, self.tr_data.getTrafo("0_2", "0_0").predict([ 260.0 ])[0])

        self.assertAlmostEqual(border_l, 77.992277992277934)
        self.assertAlmostEqual(border_r, 84.1698841699)

    def test_reference_1(self):

        rid = "0_0"
        self.tr_data.reference = "0_2" # set reference run to 0_2

        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        tree = MinimumSpanningTree(getDistanceMatrix(self.new_exp, self.multipeptides, spl_aligner))
        tree_mapped = [(self.new_exp.runs[a].get_id(), self.new_exp.runs[b].get_id()) for a,b in tree]

        # Select peakgroups, compute left/right border
        selected_pg = [pg for p in self.current_mpep1.getAllPeptides() for pg in p.get_all_peakgroups() if pg.get_cluster_id() == 1]
        border_l, border_r = integrationBorderReference(self.new_exp, selected_pg, 
            rid, self.tr_data, "median")

        # Direct transformation from 0_2 to 0_0
        self.assertAlmostEqual(border_l, self.tr_data.getTrafo("0_2", "0_0").predict([ 240.0 ])[0])
        self.assertAlmostEqual(border_r, self.tr_data.getTrafo("0_2", "0_0").predict([ 260.0 ])[0])

        self.assertAlmostEqual(border_l, 77.992277992277934)
        self.assertAlmostEqual(border_r, 84.1698841699)

        border_l, border_r = integrationBorderReference(self.new_exp, selected_pg, 
            rid, self.tr_data, "mean")
        self.assertAlmostEqual(border_l, 77.992277992277934)
        self.assertAlmostEqual(border_r, 84.1698841699)
        border_l, border_r = integrationBorderReference(self.new_exp, selected_pg, 
            rid, self.tr_data, "max_width")
        self.assertAlmostEqual(border_l, 77.992277992277934)
        self.assertAlmostEqual(border_r, 84.1698841699)

        self.assertRaises(Exception, integrationBorderReference, self.new_exp, selected_pg, 
            rid, self.tr_data, "dummy")

    def test_shortestDistance_2(self):

        rid = "0_1"
        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        dist_matrix = getDistanceMatrix(self.new_exp, self.multipeptides, spl_aligner)

        # Select peakgroups, compute left/right border
        selected_pg = [pg for p in self.current_mpep1.getAllPeptides() for pg in p.get_all_peakgroups() if pg.get_cluster_id() == 1]
        rmap = dict([(r.get_id(),i) for i,r in enumerate(self.new_exp.runs) ])
        border_l, border_r = integrationBorderShortestDistance(selected_pg, 
            rid, self.tr_data, dist_matrix, rmap)

        # Shortest distance means that we transformed directly from 0_2 to 0_1
        self.assertAlmostEqual(border_l, self.tr_data.getTrafo("0_2", "0_1").predict([ 240.0 ])[0])
        self.assertAlmostEqual(border_r, self.tr_data.getTrafo("0_2", "0_1").predict([ 260.0 ])[0])

        self.assertAlmostEqual(border_l, 168.03088803088787)
        self.assertAlmostEqual(border_r, 183.32046332)

    def test_shortestPath_2(self):

        rid = "0_1"
        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        tree = MinimumSpanningTree(getDistanceMatrix(self.new_exp, self.multipeptides, spl_aligner))
        tree_mapped = [(self.new_exp.runs[a].get_id(), self.new_exp.runs[b].get_id()) for a,b in tree]

        # Select peakgroups, compute left/right border
        selected_pg = [pg for p in self.current_mpep1.getAllPeptides() for pg in p.get_all_peakgroups() if pg.get_cluster_id() == 1]
        border_l, border_r = integrationBorderShortestPath(selected_pg, 
            rid, self.tr_data, tree_mapped)

        # Shortest path means that we transformed from 0_2 to 0_1
        self.assertAlmostEqual(border_l, self.tr_data.getTrafo("0_2", "0_1").predict( [ 240.0 ] ))
        self.assertAlmostEqual(border_r, self.tr_data.getTrafo("0_2", "0_1").predict( [ 260.0 ] ))

        self.assertAlmostEqual(border_l, 168.03088803088787)
        self.assertAlmostEqual(border_r, 183.32046332046318)

    def test_shortestPath_3(self):

        rid = "0_1"
        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        tree = MinimumSpanningTree(getDistanceMatrix(self.new_exp, self.multipeptides, spl_aligner))
        tree_mapped = [(self.new_exp.runs[a].get_id(), self.new_exp.runs[b].get_id()) for a,b in tree]

        # Select peakgroups, compute left/right border
        selected_pg = [pg for p in self.current_mpep2.getAllPeptides() for pg in p.get_all_peakgroups() if pg.get_cluster_id() == 1]
        border_l, border_r = integrationBorderShortestPath(selected_pg, 
            rid, self.tr_data, tree_mapped)

        # Shortest path means that we transformed from 0_0 to 0_2 and then to 0_1
        self.assertAlmostEqual(border_l, 
                               self.tr_data.getTrafo("0_2", "0_1").predict(
                                 self.tr_data.getTrafo("0_0", "0_2").predict([ 600 ]) 
                               ))
        self.assertAlmostEqual(border_r, 
                               self.tr_data.getTrafo("0_2", "0_1").predict(
                                 self.tr_data.getTrafo("0_0", "0_2").predict([ 700 ]) 
                               ))

        self.assertAlmostEqual(border_l, 1452.355212355212)
        self.assertAlmostEqual(border_r, 1696.9884169884167)

    def test_reference_2(self):

        rid = "0_1"
        self.tr_data.reference = "0_0" # set reference run to 0_0

        spl_aligner = SplineAligner(self.initial_alignment_cutoff)
        tree = MinimumSpanningTree(getDistanceMatrix(self.new_exp, self.multipeptides, spl_aligner))
        tree_mapped = [(self.new_exp.runs[a].get_id(), self.new_exp.runs[b].get_id()) for a,b in tree]

        # Select peakgroups, compute left/right border
        selected_pg = [pg for p in self.current_mpep1.getAllPeptides() for pg in p.get_all_peakgroups() if pg.get_cluster_id() == 1]
        border_l, border_r = integrationBorderReference(self.new_exp, selected_pg, 
            rid, self.tr_data, "median")

        # Reference 0_0 means that we transformed from 0_2 to 0_0 and then to 0_1
        self.assertAlmostEqual(border_l, 
                               self.tr_data.getTrafo("0_0", "0_1").predict(
                                 self.tr_data.getTrafo("0_2", "0_0").predict([ 240.0 ]) 
                               ))
        self.assertAlmostEqual(border_r, 
                               self.tr_data.getTrafo("0_0", "0_1").predict(
                                 self.tr_data.getTrafo("0_2", "0_0").predict([ 260.0 ]) 
                               ))

        self.assertAlmostEqual(border_l, 187.18146718146681)
        self.assertAlmostEqual(border_r, 202.00772200772167)
コード例 #10
0
class MatrixOutputWriters(unittest.TestCase):

    def setUp(self):

        # Set up dirs
        self.dirname = os.path.dirname(os.path.abspath(__file__))
        self.topdir = os.path.join(os.path.join(self.dirname, ".."), "..")
        self.datadir = os.path.join(os.path.join(self.topdir, "test"), "data")
        self.scriptdir = os.path.join(self.topdir, "analysis")

        # Set up files
        peakgroups_file = os.path.join(self.datadir, "imputeValues/imputeValues_5_input.csv")
        fdr_cutoff_all_pg = 1.0

        # Read input
        reader = SWATHScoringReader.newReader([peakgroups_file], "openswath", readmethod="complete")
        self.exp = MRExperiment()
        self.exp.runs = reader.parse_files()
        self.multipeptides = self.exp.get_all_multipeptides(fdr_cutoff_all_pg, verbose=False)

        # Set up files nr2 
        peakgroups_file = os.path.join(self.datadir, "feature_alignment_7_openswath_input.csv")
        reader = SWATHScoringReader.newReader([peakgroups_file], "openswath", readmethod="complete")
        self.exp2 = MRExperiment()
        self.exp2.runs = reader.parse_files()
        self.multipeptides2 = self.exp2.get_all_multipeptides(fdr_cutoff_all_pg, verbose=False)

        # Select the best peakgroup per peptide and select it for writing out
        fdr_cutoff = 0.01
        for mpep in self.multipeptides2:
            for prgr in mpep.getAllPeptides():
                minpg = min( [(pg.get_fdr_score(), pg) for pg in prgr.peakgroups] )
                if minpg[0] < fdr_cutoff:
                    minpg[1].select_this_peakgroup()

    def test_matrix_out_1(self):
        """Test the output matrix writers"""

        import msproteomicstoolslib.algorithms.alignment.AlignmentHelper as helper

        tmpfile = "tmp.output.csv"
        helper.write_out_matrix_file(tmpfile, self.exp.runs, self.multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)

        tmpfile = "tmp.output.tsv"
        helper.write_out_matrix_file(tmpfile, self.exp.runs, self.multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)

        tmpfile = "tmp.output.xls"
        helper.write_out_matrix_file(tmpfile, self.exp.runs, self.multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)
        tmpfile = "tmp.output.xlsx"
        helper.write_out_matrix_file(tmpfile, self.exp.runs, self.multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)

    def test_matrix_out_2(self):
        """Test the output matrix writers"""

        import msproteomicstoolslib.algorithms.alignment.AlignmentHelper as helper

        runs = self.exp2.runs
        multipeptides = self.multipeptides2

        tmpfile = "tmp.output.csv"
        helper.write_out_matrix_file(tmpfile, runs, multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)

        tmpfile = "tmp.output.tsv"
        helper.write_out_matrix_file(tmpfile, runs, multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)

        tmpfile = "tmp.output.xls"
        helper.write_out_matrix_file(tmpfile, runs, multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)
        tmpfile = "tmp.output.xlsx"
        helper.write_out_matrix_file(tmpfile, runs, multipeptides, 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)

    def test_matrix_out_3(self):
        """Test the output matrix writers"""

        import msproteomicstoolslib.algorithms.alignment.AlignmentHelper as helper

        runs = self.exp2.runs
        multipeptides = self.multipeptides2[0]  

        # Try to mess up the assumption of one selected peakgroup per run
        pep1 = multipeptides.getAllPeptides()[0]
        pep2 = multipeptides.getAllPeptides()[0]
        for pg in pep1.peakgroups:
            pg.select_this_peakgroup()
        for pg in pep2.peakgroups:
            pg.peptide = pep1
            pg.select_this_peakgroup()

        tmpfile = "tmp.output.csv"
        self.assertRaises(Exception, helper.write_out_matrix_file, tmpfile, runs, [ multipeptides ] , 0.0, 
                                     style="full", write_requant=False)
        os.remove(tmpfile)