Esempio n. 1
0
 def _generate_read_alignment_stats(
     self,
     lib_names,
     result_bam_paths,
     unaligned_reads_paths,
     output_stats_path,
 ):
     """Manage the generation of alingment statistics."""
     raw_stat_data_writer = RawStatDataWriter(pretty=True)
     read_files_and_jobs = {}
     if not self._file_needs_to_be_created(output_stats_path):
         return
     with concurrent.futures.ProcessPoolExecutor(
             max_workers=self._args.processes) as executor:
         for (
                 lib_name,
                 read_alignment_bam_path,
                 unaligned_reads_path,
         ) in zip(lib_names, result_bam_paths, unaligned_reads_paths):
             read_aligner_stats = ReadAlignerStats()
             read_files_and_jobs[lib_name] = executor.submit(
                 read_aligner_stats.count,
                 read_alignment_bam_path,
                 unaligned_reads_path,
             )
     # Evaluate thread outcome
     self._check_job_completeness(read_files_and_jobs.values())
     read_files_and_stats = dict([
         (lib_name, job.result())
         for lib_name, job in read_files_and_jobs.items()
     ])
     raw_stat_data_writer.write(read_files_and_stats, output_stats_path)
Esempio n. 2
0
 def _evaluet_job_and_generate_stat_file(self, read_files_and_jobs):
     raw_stat_data_writer = RawStatDataWriter(pretty=True)
     # Evaluate thread outcome
     self._check_job_completeness(read_files_and_jobs.values())
     if not self._file_needs_to_be_created(
             self._paths.read_processing_stats_path):
         return
     # Create a dict of the read file names and the processing
     # counting results
     read_files_and_stats = dict(
         [(lib_name, job.result()) for lib_name, job in
          read_files_and_jobs.items()])
     raw_stat_data_writer.write(
         read_files_and_stats, self._paths.read_processing_stats_path)
Esempio n. 3
0
 def _evaluet_job_and_generate_stat_file(self, read_files_and_jobs):
     raw_stat_data_writer = RawStatDataWriter(pretty=True)
     # Evaluate thread outcome
     self._check_job_completeness(read_files_and_jobs.values())
     if not self._file_needs_to_be_created(
             self._paths.read_processing_stats_path):
         return
     # Create a dict of the read file names and the processing
     # counting results
     read_files_and_stats = dict(
         [(lib_name, job.result()) for lib_name, job in
          read_files_and_jobs.items()])
     raw_stat_data_writer.write(
         read_files_and_stats, self._paths.read_processing_stats_path)
Esempio n. 4
0
 def generate_read_alignment_stats(
         self, filenames, BAM_files, output_path, output_file):
     """Manage the generation of alingment statistics."""
     raw_stat_data_writer = RawStatDataWriter(pretty=True)
     read_files_and_jobs = {}
     with concurrent.futures.ProcessPoolExecutor(
             max_workers=self._args.processes) as executor:
         for (filename, BAM_file) in zip(
                 filenames, BAM_files):
             read_aligner_stats = ReadAlignerStats()
             read_files_and_jobs[filename] = executor.submit(
                 read_aligner_stats.count, BAM_files, "NA")
     # Evaluate thread outcome
     self._helpers.check_job_completeness(read_files_and_jobs.values())
     read_files_and_stats = dict(
         [(filename, job.result())
          for filename, job in read_files_and_jobs.items()])
     raw_stat_data_writer.write(
         read_files_and_stats, output_file)
Esempio n. 5
0
 def _generate_read_alignment_stats(
         self, lib_names, result_bam_paths, unaligned_reads_paths,
         output_stats_path):
     """Manage the generation of alingment statistics."""
     raw_stat_data_writer = RawStatDataWriter(pretty=True)
     read_files_and_jobs = {}
     if not self._file_needs_to_be_created(output_stats_path):
         return
     with concurrent.futures.ProcessPoolExecutor(
             max_workers=self._args.processes) as executor:
         for (lib_name, read_alignment_bam_path,
              unaligned_reads_path) in zip(
                 lib_names, result_bam_paths, unaligned_reads_paths):
             read_aligner_stats = ReadAlignerStats()
             read_files_and_jobs[lib_name] = executor.submit(
                 read_aligner_stats.count, read_alignment_bam_path,
                 unaligned_reads_path)
     # Evaluate thread outcome
     self._check_job_completeness(read_files_and_jobs.values())
     read_files_and_stats = dict(
         [(lib_name, job.result())
          for lib_name, job in read_files_and_jobs.items()])
     raw_stat_data_writer.write(read_files_and_stats, output_stats_path)