def test_load(self):
        expected_records = [dict(lane=1,
                                 tile=2,
                                 cycle=3,
                                 error_rate=0.5,
                                 num_0_errors=4,
                                 num_1_error=5,
                                 num_2_errors=6,
                                 num_3_errors=7,
                                 num_4_errors=8)]

        records = list(read_errors(self.sample_stream))

        self.assertEqual(expected_records, records)
Exemple #2
0
    def test_load(self):
        expected_records = [
            dict(lane=1,
                 tile=2,
                 cycle=3,
                 error_rate=0.5,
                 num_0_errors=4,
                 num_1_error=5,
                 num_2_errors=6,
                 num_3_errors=7,
                 num_4_errors=8)
        ]

        records = list(read_errors(self.sample_stream))

        self.assertEqual(expected_records, records)
Exemple #3
0
def summarize_run(args, json):
    """ Summarize the run data from the InterOp folder.

    Writes some summary files.
    :return: a dictionary with summary values.
    """
    read_lengths = [json.read_length1,
                    json.index_length1,
                    json.index_length2,
                    json.read_length2]
    summary = {}

    interop_path = os.path.join(args.data_path,
                                'input',
                                'runs',
                                json.run_id,
                                'InterOp')
    phix_path = os.path.join(interop_path, 'ErrorMetricsOut.bin')
    quality_path = os.path.join(args.data_path, 'scratch', 'quality.csv')
    bad_cycles_path = os.path.join(args.data_path, 'scratch', 'bad_cycles.csv')
    summary_path = build_app_result_path(args.data_path,
                                         json,
                                         json.samples[0],
                                         suffix='_QC')
    makedirs(summary_path)
    bad_tiles_path = os.path.join(summary_path, 'bad_tiles.csv')
    with open(phix_path, 'rb') as phix, open(quality_path, 'w') as quality:
        records = error_metrics_parser.read_errors(phix)
        error_metrics_parser.write_phix_csv(quality,
                                            records,
                                            read_lengths,
                                            summary)
    with open(quality_path, 'rU') as quality, \
            open(bad_cycles_path, 'w') as bad_cycles, \
            open(bad_tiles_path, 'w') as bad_tiles:
        report_bad_cycles(quality, bad_cycles, bad_tiles)

    quality_metrics_path = os.path.join(interop_path, 'QMetricsOut.bin')
    quality_metrics_parser.summarize_quality(quality_metrics_path,
                                             summary,
                                             read_lengths)

    tile_metrics_path = os.path.join(interop_path, 'TileMetricsOut.bin')
    summarize_tiles(tile_metrics_path, summary)
    return summary
Exemple #4
0
 def upload_filter_quality(self, folder_watcher):
     read_sizes = parse_read_sizes(folder_watcher.run_folder /
                                   "RunInfo.xml")
     read_lengths = [
         read_sizes.read1, read_sizes.index1, read_sizes.index2,
         read_sizes.read2
     ]
     error_path = folder_watcher.run_folder / "InterOp/ErrorMetricsOut.bin"
     quality_csv = StringIO()
     with error_path.open('rb') as error_file:
         records = error_metrics_parser.read_errors(error_file)
         error_metrics_parser.write_phix_csv(quality_csv, records,
                                             read_lengths)
     quality_csv_bytes = BytesIO()
     quality_csv_bytes.write(quality_csv.getvalue().encode('utf8'))
     quality_csv_bytes.seek(0)
     folder_watcher.quality_dataset = self.find_or_upload_dataset(
         quality_csv_bytes, folder_watcher.run_name + '_quality.csv',
         'Error rates for {} run.'.format(folder_watcher.run_name))
Exemple #5
0
 def upload_filter_quality(self, folder_watcher):
     read_sizes = parse_read_sizes(folder_watcher.run_folder / "RunInfo.xml")
     read_lengths = [read_sizes.read1,
                     read_sizes.index1,
                     read_sizes.index2,
                     read_sizes.read2]
     error_path = folder_watcher.run_folder / "InterOp/ErrorMetricsOut.bin"
     quality_csv = StringIO()
     with error_path.open('rb') as error_file:
         records = error_metrics_parser.read_errors(error_file)
         error_metrics_parser.write_phix_csv(quality_csv,
                                             records,
                                             read_lengths)
     quality_csv_bytes = BytesIO()
     quality_csv_bytes.write(quality_csv.getvalue().encode('utf8'))
     quality_csv_bytes.seek(0)
     folder_watcher.quality_dataset = self.find_or_upload_dataset(
         quality_csv_bytes,
         folder_watcher.run_name + '_quality.csv',
         'Error rates for {} run.'.format(folder_watcher.run_name))
    def test_new_version(self):
        self.sample_data[:2] = [4, 31]
        self.sample_data.append(42)
        self.sample_data.extend(self.sample_data[2:])
        format_string = '<bbHHHfLLLLLbHHHfLLLLLb'
        self.sample_stream = BytesIO(pack(format_string, *self.sample_data))
        expected_records = [dict(lane=1,
                                 tile=2,
                                 cycle=3,
                                 error_rate=0.5,
                                 num_0_errors=4,
                                 num_1_error=5,
                                 num_2_errors=6,
                                 num_3_errors=7,
                                 num_4_errors=8)] * 2

        records = list(read_errors(self.sample_stream))

        self.maxDiff = 1000
        self.assertEqual(expected_records, records)
Exemple #7
0
    def test_new_version(self):
        self.sample_data[:2] = [4, 31]
        self.sample_data.append(42)
        self.sample_data.extend(self.sample_data[2:])
        format_string = '<bbHHHfLLLLLbHHHfLLLLLb'
        self.sample_stream = StringIO(pack(format_string, *self.sample_data))
        expected_records = [
            dict(lane=1,
                 tile=2,
                 cycle=3,
                 error_rate=0.5,
                 num_0_errors=4,
                 num_1_error=5,
                 num_2_errors=6,
                 num_3_errors=7,
                 num_4_errors=8)
        ] * 2

        records = list(read_errors(self.sample_stream))

        self.maxDiff = 1000
        self.assertEqual(expected_records, records)
Exemple #8
0
def summarize_run(args, json):
    """ Summarize the run data from the InterOp folder.

    Writes some summary files.
    :return: a dictionary with summary values.
    """
    read_lengths = [
        json.read_length1, json.index_length1, json.index_length2,
        json.read_length2
    ]
    summary = {}

    interop_path = os.path.join(args.data_path, 'input', 'runs', json.run_id,
                                'InterOp')
    phix_path = os.path.join(interop_path, 'ErrorMetricsOut.bin')
    quality_path = os.path.join(args.data_path, 'scratch', 'quality.csv')
    bad_cycles_path = os.path.join(args.data_path, 'scratch', 'bad_cycles.csv')
    summary_path = build_app_result_path(args.data_path,
                                         json,
                                         json.samples[0],
                                         suffix='_QC')
    makedirs(summary_path)
    bad_tiles_path = os.path.join(summary_path, 'bad_tiles.csv')
    with open(phix_path, 'rb') as phix, open(quality_path, 'w') as quality:
        records = error_metrics_parser.read_errors(phix)
        error_metrics_parser.write_phix_csv(quality, records, read_lengths,
                                            summary)
    with open(quality_path, 'rU') as quality, \
            open(bad_cycles_path, 'w') as bad_cycles, \
            open(bad_tiles_path, 'w') as bad_tiles:
        report_bad_cycles(quality, bad_cycles, bad_tiles)

    quality_metrics_path = os.path.join(interop_path, 'QMetricsOut.bin')
    quality_metrics_parser.summarize_quality(quality_metrics_path, summary,
                                             read_lengths)

    tile_metrics_path = os.path.join(interop_path, 'TileMetricsOut.bin')
    summarize_tiles(tile_metrics_path, summary)
    return summary
Exemple #9
0
def summarize_run(args, run_json):
    """ Summarize the run data from the InterOp folder.

    Writes some summary files.
    :return: a dictionary with summary values.
    """
    read_lengths = [
        run_json.read_length1, run_json.index_length1, run_json.index_length2,
        run_json.read_length2
    ]
    summary = {}

    has_error_metrics = run_json.has_runinfo

    if has_error_metrics:
        interop_path = os.path.join(args.data_path, 'input', 'runs',
                                    run_json.run_id, 'InterOp')
        phix_path = os.path.join(interop_path, 'ErrorMetricsOut.bin')
        quality_path = os.path.join(args.data_path, 'scratch', 'quality.csv')
        bad_cycles_path = os.path.join(args.data_path, 'scratch',
                                       'bad_cycles.csv')
        bad_tiles_path = os.path.join(args.qc_path, 'bad_tiles.csv')
        with open(phix_path, 'rb') as phix, open(quality_path, 'w') as quality:
            records = error_metrics_parser.read_errors(phix)
            error_metrics_parser.write_phix_csv(quality, records, read_lengths,
                                                summary)
        with open(quality_path, 'r') as quality, \
                open(bad_cycles_path, 'w') as bad_cycles, \
                open(bad_tiles_path, 'w') as bad_tiles:
            report_bad_cycles(quality, bad_cycles, bad_tiles)

        quality_metrics_path = os.path.join(interop_path, 'QMetricsOut.bin')
        quality_metrics_parser.summarize_quality(quality_metrics_path, summary,
                                                 read_lengths)

        tile_metrics_path = os.path.join(interop_path, 'TileMetricsOut.bin')
        summarize_tiles(tile_metrics_path, summary)
    return summary
Exemple #10
0
def summarize_run(run_info):
    """ Summarize the run data from the InterOp folder.

    Writes some summary files.
    :param RunInfo run_info: details of the run
    :return: a dictionary with summary values.
    """
    summary = {}

    if run_info.read_sizes is not None:
        read_lengths = [run_info.read_sizes.read1,
                        run_info.read_sizes.index1,
                        run_info.read_sizes.index2,
                        run_info.read_sizes.read2]
        phix_path = os.path.join(run_info.interop_path, 'ErrorMetricsOut.bin')
        with open(phix_path, 'rb') as phix, \
                open(run_info.quality_csv, 'w') as quality:
            records = error_metrics_parser.read_errors(phix)
            error_metrics_parser.write_phix_csv(quality,
                                                records,
                                                read_lengths,
                                                summary)
        with open(run_info.quality_csv) as quality, \
                open(run_info.bad_cycles_csv, 'w') as bad_cycles, \
                open(run_info.bad_tiles_csv, 'w') as bad_tiles:
            report_bad_cycles(quality, bad_cycles, bad_tiles)

        quality_metrics_path = os.path.join(run_info.interop_path,
                                            'QMetricsOut.bin')
        quality_metrics_parser.summarize_quality(quality_metrics_path,
                                                 summary,
                                                 read_lengths)

        tile_metrics_path = os.path.join(run_info.interop_path,
                                         'TileMetricsOut.bin')
        summarize_tiles(tile_metrics_path, summary)
    return summary
Exemple #11
0
def summarize_run(run_info):
    """ Summarize the run data from the InterOp folder.

    Writes some summary files.
    :param RunInfo run_info: details of the run
    :return: a dictionary with summary values.
    """
    summary = {}

    if run_info.read_sizes is not None:
        read_lengths = [run_info.read_sizes.read1,
                        run_info.read_sizes.index1,
                        run_info.read_sizes.index2,
                        run_info.read_sizes.read2]
        phix_path = os.path.join(run_info.interop_path, 'ErrorMetricsOut.bin')
        with open(phix_path, 'rb') as phix, \
                open(run_info.quality_csv, 'w') as quality:
            records = error_metrics_parser.read_errors(phix)
            error_metrics_parser.write_phix_csv(quality,
                                                records,
                                                read_lengths,
                                                summary)
        with open(run_info.quality_csv) as quality, \
                open(run_info.bad_cycles_csv, 'w') as bad_cycles, \
                open(run_info.bad_tiles_csv, 'w') as bad_tiles:
            report_bad_cycles(quality, bad_cycles, bad_tiles)

        quality_metrics_path = os.path.join(run_info.interop_path,
                                            'QMetricsOut.bin')
        quality_metrics_parser.summarize_quality(quality_metrics_path,
                                                 summary,
                                                 read_lengths)

        tile_metrics_path = os.path.join(run_info.interop_path,
                                         'TileMetricsOut.bin')
        summarize_tiles(tile_metrics_path, summary)
    return summary