Exemple #1
0
    def _test(self, read_indexes, debug_file_prefix=None):
        read_indexes = reversed(read_indexes)
        simple_filename1 = self.filename1 + '_simple.fastq'
        self.write_simple_fastq(simple_filename1, read_indexes)
        workdir = os.path.dirname(self.filename1)
        os.chdir(workdir)
        simple_filename2 = get_reverse_filename(simple_filename1)
        censored_filename1 = os.path.join(workdir, 'rerun.censored1.fastq')
        censored_filename2 = os.path.join(workdir, 'rerun.censored2.fastq')
        trimmed_filename1 = os.path.join(workdir, 'rerun.trimmed1.fastq')
        trimmed_filename2 = os.path.join(workdir, 'rerun.trimmed2.fastq')
        prelim_censored_filename = os.path.join(workdir, 'rerun_censored.prelim.csv')
        prelim_trimmed_filename = os.path.join(workdir, 'rerun_trimmed.prelim.csv')
        with open(self.bad_cycles_filename, 'rU') as bad_cycles:
            bad_cycles = list(csv.DictReader(bad_cycles))
        with open(simple_filename1, 'rU') as simple1, \
                open(censored_filename1, 'w') as censored1:
            censor(simple1, bad_cycles, censored1, use_gzip=False)
        with open(simple_filename2, 'rU') as simple2, \
                open(censored_filename2, 'w') as censored2:
            censor(simple2, bad_cycles, censored2, use_gzip=False)
        with open(prelim_censored_filename, 'w+') as prelim_censored_csv, \
                open(prelim_trimmed_filename, 'w+') as prelim_trimmed_csv:
            prelim_map(censored_filename1,
                       censored_filename2,
                       prelim_censored_csv,
                       nthreads=BOWTIE_THREADS)
            trim((simple_filename1, simple_filename2),
                 self.bad_cycles_filename,
                 (trimmed_filename1, trimmed_filename2),
                 use_gzip=False)
            prelim_map(trimmed_filename1,
                       trimmed_filename2,
                       prelim_trimmed_csv,
                       nthreads=BOWTIE_THREADS)
            prelim_censored_csv.seek(0)
            censored_map_count = self.count_mapped(prelim_censored_csv)
            prelim_trimmed_csv.seek(0)
            trimmed_map_count = self.count_mapped(prelim_trimmed_csv)

        return self.get_result(censored_map_count, trimmed_map_count)
    def _test(self, read_indexes, debug_file_prefix=None):
        read_indexes = reversed(read_indexes)
        simple_filename1 = self.filename1 + '_simple.fastq'
        self.write_simple_fastq(simple_filename1, read_indexes)
        workdir = os.path.dirname(self.filename1)
        os.chdir(workdir)
        simple_filename2 = get_reverse_filename(simple_filename1)
        censored_filename1 = os.path.join(workdir, 'rerun.censored1.fastq')
        censored_filename2 = os.path.join(workdir, 'rerun.censored2.fastq')
        trimmed_filename1 = os.path.join(workdir, 'rerun.trimmed1.fastq')
        trimmed_filename2 = os.path.join(workdir, 'rerun.trimmed2.fastq')
        prelim_censored_filename = os.path.join(workdir, 'rerun_censored.prelim.csv')
        prelim_trimmed_filename = os.path.join(workdir, 'rerun_trimmed.prelim.csv')
        with open(self.bad_cycles_filename, 'rU') as bad_cycles:
            bad_cycles = list(csv.DictReader(bad_cycles))
        with open(simple_filename1, 'rU') as simple1, \
                open(censored_filename1, 'w') as censored1:
            censor(simple1, bad_cycles, censored1, use_gzip=False)
        with open(simple_filename2, 'rU') as simple2, \
                open(censored_filename2, 'w') as censored2:
            censor(simple2, bad_cycles, censored2, use_gzip=False)
        with open(prelim_censored_filename, 'w+') as prelim_censored_csv, \
                open(prelim_trimmed_filename, 'w+') as prelim_trimmed_csv:
            prelim_map(censored_filename1,
                       censored_filename2,
                       prelim_censored_csv,
                       nthreads=BOWTIE_THREADS)
            trim((simple_filename1, simple_filename2),
                 self.bad_cycles_filename,
                 (trimmed_filename1, trimmed_filename2),
                 use_gzip=False)
            prelim_map(trimmed_filename1,
                       trimmed_filename2,
                       prelim_trimmed_csv,
                       nthreads=BOWTIE_THREADS)
            prelim_censored_csv.seek(0)
            censored_map_count = self.count_mapped(prelim_censored_csv)
            prelim_trimmed_csv.seek(0)
            trimmed_map_count = self.count_mapped(prelim_trimmed_csv)

        return self.get_result(censored_map_count, trimmed_map_count)
Exemple #3
0
def test_trim(tmpdir):
    read1_content = 'TATCTACTAACTGTCGGTCTAC'
    read2_content = reverse_and_complement(read1_content)
    expected1 = build_fastq(read1_content)
    expected2 = build_fastq(read2_content)

    tmp_path = Path(str(tmpdir))
    fastq1_path = tmp_path / 'read1.fastq'
    fastq2_path = tmp_path / 'read2.fastq'
    trimmed1_path = tmp_path / 'trimmed1.fastq'
    trimmed2_path = tmp_path / 'trimmed2.fastq'
    fastq1_path.write_text(expected1)
    fastq2_path.write_text(expected2)

    trim([fastq1_path, fastq2_path],
         'no_bad_cycles.csv',
         [str(trimmed1_path), str(trimmed2_path)],
         use_gzip=False)

    trimmed1 = trimmed1_path.read_text()
    trimmed2 = trimmed2_path.read_text()
    assert trimmed1 == expected1
    assert trimmed2 == expected2
Exemple #4
0
    def process(self,
                pssm,
                excluded_seeds=(),
                excluded_projects=(),
                force_gzip=False,
                use_denovo=False):
        """ Process a single sample.

        :param pssm: the pssm library for running G2P analysis
        :param excluded_seeds: seeds to exclude from mapping
        :param excluded_projects: project codes to exclude from reporting
        :param bool force_gzip: treat FASTQ files as gzipped, even when they
            don't end in .gz
        :param bool use_denovo: True if de novo assembly should be used,
            instead of bowtie2 mapping against references.
        """
        logger.info('Processing %s (%r).', self, self.fastq1)
        scratch_path = self.get_scratch_path()
        makedirs(scratch_path)
        use_gzip = force_gzip or self.fastq1.endswith('.gz')

        sample_info = self.load_sample_info()

        with open(self.read_summary_csv, 'w') as read_summary:
            trim((self.fastq1, self.fastq2),
                 self.bad_cycles_csv,
                 (self.trimmed1_fastq, self.trimmed2_fastq),
                 summary_file=read_summary,
                 use_gzip=use_gzip,
                 skip=self.skip,
                 project_code=sample_info.get('project'))

        if use_denovo:
            logger.info('Running merge_for_entropy on %s.', self)
            with open(self.read_entropy_csv, 'w') as read_entropy_csv:
                merge_for_entropy(self.trimmed1_fastq,
                                  self.trimmed2_fastq,
                                  read_entropy_csv,
                                  scratch_path)

            write_merge_lengths_plot(self.read_entropy_csv,
                                     self.merge_lengths_svg)

        logger.info('Running fastq_g2p on %s.', self)
        with open(self.trimmed1_fastq) as fastq1, \
                open(self.trimmed2_fastq) as fastq2, \
                open(self.g2p_csv, 'w') as g2p_csv, \
                open(self.g2p_summary_csv, 'w') as g2p_summary_csv, \
                open(self.g2p_unmapped1_fastq, 'w') as g2p_unmapped1, \
                open(self.g2p_unmapped2_fastq, 'w') as g2p_unmapped2, \
                open(self.g2p_aligned_csv, 'w') as g2p_aligned_csv, \
                open(self.merged_contigs_csv, 'w') as merged_contigs_csv:

            fastq_g2p(pssm=pssm,
                      fastq1=fastq1,
                      fastq2=fastq2,
                      g2p_csv=g2p_csv,
                      g2p_summary_csv=g2p_summary_csv,
                      unmapped1=g2p_unmapped1,
                      unmapped2=g2p_unmapped2,
                      aligned_csv=g2p_aligned_csv,
                      min_count=DEFAULT_MIN_COUNT,
                      min_valid=MIN_VALID,
                      min_valid_percent=MIN_VALID_PERCENT,
                      merged_contigs_csv=merged_contigs_csv)

        if use_denovo:
            self.run_denovo(excluded_seeds)
        else:
            self.run_mapping(excluded_seeds)

        logger.info('Running sam2aln on %s.', self)
        with open(self.remap_csv) as remap_csv, \
                open(self.aligned_csv, 'w') as aligned_csv, \
                open(self.conseq_ins_csv, 'w') as conseq_ins_csv, \
                open(self.failed_csv, 'w') as failed_csv, \
                open(self.clipping_csv, 'w') as clipping_csv:

            sam2aln(remap_csv,
                    aligned_csv,
                    conseq_ins_csv,
                    failed_csv,
                    clipping_csv=clipping_csv)

        logger.info('Running aln2counts on %s.', self)
        if use_denovo:
            contigs_path = self.contigs_csv
        else:
            contigs_path = os.devnull
        with open(self.aligned_csv) as aligned_csv, \
                open(self.g2p_aligned_csv) as g2p_aligned_csv, \
                open(self.clipping_csv) as clipping_csv, \
                open(self.conseq_ins_csv) as conseq_ins_csv, \
                open(self.remap_conseq_csv) as remap_conseq_csv, \
                open(contigs_path) as contigs_csv, \
                open(self.nuc_csv, 'w') as nuc_csv, \
                open(self.nuc_detail_csv, 'w') as nuc_detail_csv, \
                open(self.amino_csv, 'w') as amino_csv, \
                open(self.amino_detail_csv, 'w') as amino_detail_csv, \
                open(self.coord_ins_csv, 'w') as coord_ins_csv, \
                open(self.conseq_csv, 'w') as conseq_csv, \
                open(self.conseq_region_csv, 'w') as conseq_region_csv, \
                open(self.failed_align_csv, 'w') as failed_align_csv, \
                open(self.coverage_summary_csv, 'w') as coverage_summary_csv, \
                open(self.genome_coverage_csv, 'w') as genome_coverage_csv, \
                open(self.conseq_all_csv, "w") as conseq_all_csv, \
                open(self.minimap_hits_csv, "w") as minimap_hits_csv:

            if not use_denovo:
                for f in (amino_detail_csv, nuc_detail_csv):
                    f.close()
                    os.remove(f.name)
                amino_detail_csv = nuc_detail_csv = None
            aln2counts(aligned_csv,
                       nuc_csv,
                       amino_csv,
                       coord_ins_csv,
                       conseq_csv,
                       failed_align_csv,
                       coverage_summary_csv=coverage_summary_csv,
                       clipping_csv=clipping_csv,
                       conseq_ins_csv=conseq_ins_csv,
                       g2p_aligned_csv=g2p_aligned_csv,
                       remap_conseq_csv=remap_conseq_csv,
                       conseq_region_csv=conseq_region_csv,
                       amino_detail_csv=amino_detail_csv,
                       nuc_detail_csv=nuc_detail_csv,
                       genome_coverage_csv=genome_coverage_csv,
                       contigs_csv=contigs_csv,
                       conseq_all_csv=conseq_all_csv,
                       minimap_hits_csv=minimap_hits_csv)

        logger.info('Running coverage_plots on %s.', self)
        os.makedirs(self.coverage_maps)
        with open(self.amino_csv) as amino_csv, \
                open(self.coverage_scores_csv, 'w') as coverage_scores_csv:
            coverage_plot(amino_csv,
                          coverage_scores_csv,
                          coverage_maps_path=self.coverage_maps,
                          coverage_maps_prefix=self.name,
                          excluded_projects=excluded_projects)

        with open(self.genome_coverage_csv) as genome_coverage_csv, \
                open(self.minimap_hits_csv) as minimap_hits_csv:
            if not use_denovo:
                minimap_hits_csv = None
            plot_genome_coverage(genome_coverage_csv,
                                 minimap_hits_csv,
                                 self.genome_coverage_svg)

        logger.info('Running cascade_report on %s.', self)
        with open(self.g2p_summary_csv) as g2p_summary_csv, \
                open(self.remap_counts_csv) as remap_counts_csv, \
                open(self.aligned_csv) as aligned_csv, \
                open(self.cascade_csv, 'w') as cascade_csv:
            cascade_report = CascadeReport(cascade_csv)
            cascade_report.g2p_summary_csv = g2p_summary_csv
            cascade_report.remap_counts_csv = remap_counts_csv
            cascade_report.aligned_csv = aligned_csv
            cascade_report.generate()
        logger.info('Finished sample %s.', self)
Exemple #5
0
    def process(self,
                pssm,
                excluded_seeds=(),
                excluded_projects=(),
                force_gzip=False):
        """ Process a single sample.

        :param pssm: the pssm library for running G2P analysis
        :param excluded_seeds: seeds to exclude from mapping
        :param excluded_projects: project codes to exclude from reporting
        :param bool force_gzip: treat FASTQ files as gzipped, even when they
            don't end in .gz
        """
        logger.info('Processing %s (%r).', self, self.fastq1)
        scratch_path = os.path.dirname(self.prelim_csv)
        os.mkdir(scratch_path)
        use_gzip = force_gzip or self.fastq1.endswith('.gz')

        with open(self.read_summary_csv, 'w') as read_summary:
            trim((self.fastq1, self.fastq2),
                 self.bad_cycles_csv,
                 (self.trimmed1_fastq, self.trimmed2_fastq),
                 summary_file=read_summary,
                 use_gzip=use_gzip)

        logger.info('Running fastq_g2p on %s.', self)
        with open(self.trimmed1_fastq) as fastq1, \
                open(self.trimmed2_fastq) as fastq2, \
                open(self.g2p_csv, 'w') as g2p_csv, \
                open(self.g2p_summary_csv, 'w') as g2p_summary_csv, \
                open(self.g2p_unmapped1_fastq, 'w') as g2p_unmapped1, \
                open(self.g2p_unmapped2_fastq, 'w') as g2p_unmapped2, \
                open(self.g2p_aligned_csv, 'w') as g2p_aligned_csv:

            fastq_g2p(pssm=pssm,
                      fastq1=fastq1,
                      fastq2=fastq2,
                      g2p_csv=g2p_csv,
                      g2p_summary_csv=g2p_summary_csv,
                      unmapped1=g2p_unmapped1,
                      unmapped2=g2p_unmapped2,
                      aligned_csv=g2p_aligned_csv,
                      min_count=DEFAULT_MIN_COUNT,
                      min_valid=MIN_VALID,
                      min_valid_percent=MIN_VALID_PERCENT)

        logger.info('Running prelim_map on %s.', self)
        with open(self.prelim_csv, 'w') as prelim_csv:
            prelim_map(self.g2p_unmapped1_fastq,
                       self.g2p_unmapped2_fastq,
                       prelim_csv,
                       work_path=scratch_path,
                       excluded_seeds=excluded_seeds)

        logger.info('Running remap on %s.', self)
        if self.debug_remap:
            debug_file_prefix = os.path.join(scratch_path, 'debug')
        else:
            debug_file_prefix = None
        with open(self.prelim_csv) as prelim_csv, \
                open(self.remap_csv, 'w') as remap_csv, \
                open(self.remap_counts_csv, 'w') as counts_csv, \
                open(self.remap_conseq_csv, 'w') as conseq_csv, \
                open(self.unmapped1_fastq, 'w') as unmapped1, \
                open(self.unmapped2_fastq, 'w') as unmapped2:

            remap(self.g2p_unmapped1_fastq,
                  self.g2p_unmapped2_fastq,
                  prelim_csv,
                  remap_csv,
                  counts_csv,
                  conseq_csv,
                  unmapped1,
                  unmapped2,
                  scratch_path,
                  debug_file_prefix=debug_file_prefix)

        logger.info('Running sam2aln on %s.', self)
        with open(self.remap_csv) as remap_csv, \
                open(self.aligned_csv, 'w') as aligned_csv, \
                open(self.conseq_ins_csv, 'w') as conseq_ins_csv, \
                open(self.failed_csv, 'w') as failed_csv, \
                open(self.clipping_csv, 'w') as clipping_csv:

            sam2aln(remap_csv,
                    aligned_csv,
                    conseq_ins_csv,
                    failed_csv,
                    clipping_csv=clipping_csv)

        logger.info('Running aln2counts on %s.', self)
        with open(self.aligned_csv) as aligned_csv, \
                open(self.g2p_aligned_csv) as g2p_aligned_csv, \
                open(self.clipping_csv) as clipping_csv, \
                open(self.conseq_ins_csv) as conseq_ins_csv, \
                open(self.remap_conseq_csv) as remap_conseq_csv, \
                open(self.nuc_csv, 'w') as nuc_csv, \
                open(self.amino_csv, 'w') as amino_csv, \
                open(self.coord_ins_csv, 'w') as coord_ins_csv, \
                open(self.conseq_csv, 'w') as conseq_csv, \
                open(self.conseq_region_csv, 'w') as conseq_region_csv, \
                open(self.failed_align_csv, 'w') as failed_align_csv, \
                open(self.coverage_summary_csv, 'w') as coverage_summary_csv:

            aln2counts(aligned_csv,
                       nuc_csv,
                       amino_csv,
                       coord_ins_csv,
                       conseq_csv,
                       failed_align_csv,
                       coverage_summary_csv=coverage_summary_csv,
                       clipping_csv=clipping_csv,
                       conseq_ins_csv=conseq_ins_csv,
                       g2p_aligned_csv=g2p_aligned_csv,
                       remap_conseq_csv=remap_conseq_csv,
                       conseq_region_csv=conseq_region_csv)

        logger.info('Running coverage_plots on %s.', self)
        os.makedirs(self.coverage_maps)
        with open(self.amino_csv) as amino_csv, \
                open(self.coverage_scores_csv, 'w') as coverage_scores_csv:
            coverage_plot(amino_csv,
                          coverage_scores_csv,
                          coverage_maps_path=self.coverage_maps,
                          coverage_maps_prefix=self.name,
                          excluded_projects=excluded_projects)

        logger.info('Running cascade_report on %s.', self)
        with open(self.g2p_summary_csv) as g2p_summary_csv, \
                open(self.remap_counts_csv) as remap_counts_csv, \
                open(self.aligned_csv) as aligned_csv, \
                open(self.cascade_csv, 'w') as cascade_csv:
            cascade_report = CascadeReport(cascade_csv)
            cascade_report.g2p_summary_csv = g2p_summary_csv
            cascade_report.remap_counts_csv = remap_counts_csv
            cascade_report.aligned_csv = aligned_csv
            cascade_report.generate()
        logger.info('Finished sample %s.', self)
Exemple #6
0
def process_sample(sample_index, run_info, args, pssm):
    """ Process a single sample.

    :param sample_index: which sample to process from the session JSON
    :param run_info: run parameters loaded from the session JSON
    :param args: the command-line arguments
    :param pssm: the pssm library for running G2P analysis
    """
    scratch_path = os.path.join(args.data_path, 'scratch')
    sample_info = run_info.samples[sample_index]
    sample_id = sample_info['Id']
    sample_name = sample_info['Name']
    sample_dir = os.path.join(args.data_path, 'input', 'samples', sample_id,
                              'Data', 'Intensities', 'BaseCalls')
    if not os.path.exists(sample_dir):
        sample_dir = os.path.join(args.data_path, 'input', 'samples',
                                  sample_id)
    sample_path = None
    for root, _dirs, files in os.walk(sample_dir):
        sample_paths = fnmatch.filter(files, '*_R1_*')
        if sample_paths:
            sample_path = os.path.join(root, sample_paths[0])
            break
    if sample_path is None:
        raise RuntimeError(
            'No R1 file found for sample id {}.'.format(sample_id))
    sample_path2 = sample_path.replace('_R1_', '_R2_')
    if not os.path.exists(sample_path2):
        raise RuntimeError('R2 file missing for sample id {}: {!r}.'.format(
            sample_id, sample_path2))
    logger.info('Processing sample %s (%d of %d): %s (%s).', sample_id,
                sample_index + 1, len(run_info.samples), sample_name,
                sample_path)

    sample_qc_path = os.path.join(args.qc_path, sample_name)
    makedirs(sample_qc_path)
    sample_scratch_path = os.path.join(scratch_path, sample_name)
    makedirs(sample_scratch_path)

    bad_cycles_path = os.path.join(scratch_path, 'bad_cycles.csv')
    trimmed_path1 = os.path.join(sample_scratch_path, 'trimmed1.fastq')
    read_summary_path = os.path.join(sample_scratch_path, 'read_summary.csv')
    trimmed_path2 = os.path.join(sample_scratch_path, 'trimmed2.fastq')
    with open(read_summary_path, 'w') as read_summary:
        trim((sample_path, sample_path2),
             bad_cycles_path, (trimmed_path1, trimmed_path2),
             summary_file=read_summary,
             use_gzip=sample_path.endswith('.gz'))

    logger.info('Running fastq_g2p (%d of %d).', sample_index + 1,
                len(run_info.samples))
    g2p_unmapped1_path = os.path.join(sample_scratch_path,
                                      'g2p_unmapped1.fastq')
    g2p_unmapped2_path = os.path.join(sample_scratch_path,
                                      'g2p_unmapped2.fastq')
    with open(os.path.join(sample_scratch_path, 'trimmed1.fastq'), 'r') as fastq1, \
            open(os.path.join(sample_scratch_path, 'trimmed2.fastq'), 'r') as fastq2, \
            open(os.path.join(sample_scratch_path, 'g2p.csv'), 'w') as g2p_csv, \
            open(os.path.join(sample_scratch_path, 'g2p_summary.csv'), 'w') as g2p_summary_csv, \
            open(g2p_unmapped1_path, 'w') as g2p_unmapped1, \
            open(g2p_unmapped2_path, 'w') as g2p_unmapped2, \
            open(os.path.join(sample_scratch_path, 'g2p_aligned.csv'), 'w') as g2p_aligned_csv:

        fastq_g2p(pssm=pssm,
                  fastq1=fastq1,
                  fastq2=fastq2,
                  g2p_csv=g2p_csv,
                  g2p_summary_csv=g2p_summary_csv,
                  unmapped1=g2p_unmapped1,
                  unmapped2=g2p_unmapped2,
                  aligned_csv=g2p_aligned_csv,
                  min_count=DEFAULT_MIN_COUNT,
                  min_valid=MIN_VALID,
                  min_valid_percent=MIN_VALID_PERCENT)

    logger.info('Running prelim_map (%d of %d).', sample_index + 1,
                len(run_info.samples))
    excluded_seeds = [] if args.all_projects else EXCLUDED_SEEDS
    with open(os.path.join(sample_scratch_path, 'prelim.csv'),
              'w') as prelim_csv:
        prelim_map(g2p_unmapped1_path,
                   g2p_unmapped2_path,
                   prelim_csv,
                   work_path=sample_scratch_path,
                   excluded_seeds=excluded_seeds)

    logger.info('Running remap (%d of %d).', sample_index + 1,
                len(run_info.samples))
    if args.debug_remap:
        debug_file_prefix = os.path.join(sample_scratch_path, 'debug')
    else:
        debug_file_prefix = None
    with open(os.path.join(sample_scratch_path, 'prelim.csv'), 'r') as prelim_csv, \
            open(os.path.join(sample_scratch_path, 'remap.csv'), 'w') as remap_csv, \
            open(os.path.join(sample_scratch_path, 'remap_counts.csv'), 'w') as counts_csv, \
            open(os.path.join(sample_scratch_path, 'remap_conseq.csv'), 'w') as conseq_csv, \
            open(os.path.join(sample_qc_path, 'unmapped1.fastq'), 'w') as unmapped1, \
            open(os.path.join(sample_qc_path, 'unmapped2.fastq'), 'w') as unmapped2:

        remap(g2p_unmapped1_path,
              g2p_unmapped2_path,
              prelim_csv,
              remap_csv,
              counts_csv,
              conseq_csv,
              unmapped1,
              unmapped2,
              sample_scratch_path,
              debug_file_prefix=debug_file_prefix)

    logger.info('Running sam2aln (%d of %d).', sample_index + 1,
                len(run_info.samples))
    with open(os.path.join(sample_scratch_path, 'remap.csv'), 'r') as remap_csv, \
            open(os.path.join(sample_scratch_path, 'aligned.csv'), 'w') as aligned_csv, \
            open(os.path.join(sample_scratch_path, 'conseq_ins.csv'), 'w') as conseq_ins_csv, \
            open(os.path.join(sample_scratch_path, 'failed_read.csv'), 'w') as failed_csv, \
            open(os.path.join(sample_scratch_path, 'clipping.csv'), 'w') as clipping_csv:

        sam2aln(remap_csv,
                aligned_csv,
                conseq_ins_csv,
                failed_csv,
                clipping_csv=clipping_csv)

    logger.info('Running aln2counts (%d of %d).', sample_index + 1,
                len(run_info.samples))
    with open(os.path.join(sample_scratch_path, 'aligned.csv'), 'r') as aligned_csv, \
            open(os.path.join(sample_scratch_path, 'g2p_aligned.csv'), 'r') as g2p_aligned_csv, \
            open(os.path.join(sample_scratch_path, 'clipping.csv'), 'r') as clipping_csv, \
            open(os.path.join(sample_scratch_path, 'conseq_ins.csv'), 'r') as conseq_ins_csv, \
            open(os.path.join(sample_scratch_path, 'remap_conseq.csv'), 'r') as remap_conseq_csv, \
            open(os.path.join(sample_scratch_path, 'nuc.csv'), 'w') as nuc_csv, \
            open(os.path.join(sample_scratch_path, 'amino.csv'), 'w') as amino_csv, \
            open(os.path.join(sample_scratch_path, 'coord_ins.csv'), 'w') as coord_ins_csv, \
            open(os.path.join(sample_scratch_path, 'conseq.csv'), 'w') as conseq_csv, \
            open(os.path.join(sample_scratch_path, 'failed_align.csv'), 'w') as failed_align_csv, \
            open(os.path.join(sample_scratch_path, 'coverage_summary.csv'), 'w') as coverage_summary_csv:

        aln2counts(aligned_csv,
                   nuc_csv,
                   amino_csv,
                   coord_ins_csv,
                   conseq_csv,
                   failed_align_csv,
                   coverage_summary_csv=coverage_summary_csv,
                   clipping_csv=clipping_csv,
                   conseq_ins_csv=conseq_ins_csv,
                   g2p_aligned_csv=g2p_aligned_csv,
                   remap_conseq_csv=remap_conseq_csv)

    logger.info('Running coverage_plots (%d of %d).', sample_index + 1,
                len(run_info.samples))
    coverage_maps_path = os.path.join(args.qc_path, 'coverage_maps')
    makedirs(coverage_maps_path)
    excluded_projects = [] if args.all_projects else EXCLUDED_PROJECTS
    with open(os.path.join(sample_scratch_path, 'amino.csv'), 'r') as amino_csv, \
            open(os.path.join(sample_scratch_path, 'coverage_scores.csv'), 'w') as coverage_scores_csv:
        coverage_plot(amino_csv,
                      coverage_scores_csv,
                      coverage_maps_path=coverage_maps_path,
                      coverage_maps_prefix=sample_name,
                      excluded_projects=excluded_projects)

    logger.info('Running hivdb (%d of %d).', sample_index + 1,
                len(run_info.samples))
    with open(os.path.join(sample_scratch_path, 'amino.csv')) as amino_csv, \
            open(os.path.join(sample_scratch_path, 'coverage_scores.csv')) as coverage_scores_csv, \
            open(os.path.join(sample_scratch_path, 'resistance.csv'), 'w') as resistance_csv, \
            open(os.path.join(sample_scratch_path, 'mutations.csv'), 'w') as mutations_csv:
        hivdb(amino_csv, coverage_scores_csv, resistance_csv, mutations_csv,
              run_info.reports)

    logger.info('Running resistance report (%d of %d).', sample_index + 1,
                len(run_info.samples))
    source_path = os.path.dirname(__file__)
    version_filename = os.path.join(source_path, 'version.txt')
    if not os.path.exists(version_filename):
        git_version = 'v0-dev'
    else:
        with open(version_filename) as version_file:
            git_version = version_file.read().strip()
    reports_path = os.path.join(args.qc_path, 'resistance_reports')
    makedirs(reports_path)
    report_filename = os.path.join(reports_path,
                                   sample_name + '_resistance.pdf')
    with open(os.path.join(sample_scratch_path, 'resistance.csv')) as resistance_csv, \
            open(os.path.join(sample_scratch_path, 'mutations.csv')) as mutations_csv, \
            open(report_filename, 'wb') as report_pdf:
        gen_report(resistance_csv,
                   mutations_csv,
                   report_pdf,
                   sample_name,
                   git_version=git_version)

    logger.info('Running cascade_report (%d of %d).', sample_index + 1,
                len(run_info.samples))
    with open(os.path.join(sample_scratch_path, 'g2p_summary.csv'), 'r') as g2p_summary_csv, \
            open(os.path.join(sample_scratch_path, 'remap_counts.csv'), 'r') as remap_counts_csv, \
            open(os.path.join(sample_scratch_path, 'aligned.csv'), 'r') as aligned_csv, \
            open(os.path.join(sample_scratch_path, 'cascade.csv'), 'w') as cascade_csv:
        cascade_report = CascadeReport(cascade_csv)
        cascade_report.g2p_summary_csv = g2p_summary_csv
        cascade_report.remap_counts_csv = remap_counts_csv
        cascade_report.aligned_csv = aligned_csv
        cascade_report.generate()
    logger.info('Finished sample (%d of %d).', sample_index + 1,
                len(run_info.samples))
Exemple #7
0
    def process(self,
                pssm,
                excluded_seeds=(),
                excluded_projects=(),
                force_gzip=False):
        """ Process a single sample.

        :param pssm: the pssm library for running G2P analysis
        :param excluded_seeds: seeds to exclude from mapping
        :param excluded_projects: project codes to exclude from reporting
        :param bool force_gzip: treat FASTQ files as gzipped, even when they
            don't end in .gz
        """
        logger.info('Processing %s (%r).', self, self.fastq1)
        scratch_path = os.path.dirname(self.prelim_csv)
        os.mkdir(scratch_path)
        use_gzip = force_gzip or self.fastq1.endswith('.gz')

        with open(self.read_summary_csv, 'w') as read_summary:
            trim((self.fastq1, self.fastq2),
                 self.bad_cycles_csv,
                 (self.trimmed1_fastq, self.trimmed2_fastq),
                 summary_file=read_summary,
                 use_gzip=use_gzip)

        logger.info('Running fastq_g2p on %s.', self)
        with open(self.trimmed1_fastq) as fastq1, \
                open(self.trimmed2_fastq) as fastq2, \
                open(self.g2p_csv, 'w') as g2p_csv, \
                open(self.g2p_summary_csv, 'w') as g2p_summary_csv, \
                open(self.g2p_unmapped1_fastq, 'w') as g2p_unmapped1, \
                open(self.g2p_unmapped2_fastq, 'w') as g2p_unmapped2, \
                open(self.g2p_aligned_csv, 'w') as g2p_aligned_csv:

            fastq_g2p(pssm=pssm,
                      fastq1=fastq1,
                      fastq2=fastq2,
                      g2p_csv=g2p_csv,
                      g2p_summary_csv=g2p_summary_csv,
                      unmapped1=g2p_unmapped1,
                      unmapped2=g2p_unmapped2,
                      aligned_csv=g2p_aligned_csv,
                      min_count=DEFAULT_MIN_COUNT,
                      min_valid=MIN_VALID,
                      min_valid_percent=MIN_VALID_PERCENT)

        logger.info('Running prelim_map on %s.', self)
        with open(self.prelim_csv, 'w') as prelim_csv:
            prelim_map(self.g2p_unmapped1_fastq,
                       self.g2p_unmapped2_fastq,
                       prelim_csv,
                       work_path=scratch_path,
                       excluded_seeds=excluded_seeds)

        logger.info('Running remap on %s.', self)
        if self.debug_remap:
            debug_file_prefix = os.path.join(scratch_path, 'debug')
        else:
            debug_file_prefix = None
        with open(self.prelim_csv) as prelim_csv, \
                open(self.remap_csv, 'w') as remap_csv, \
                open(self.remap_counts_csv, 'w') as counts_csv, \
                open(self.remap_conseq_csv, 'w') as conseq_csv, \
                open(self.unmapped1_fastq, 'w') as unmapped1, \
                open(self.unmapped2_fastq, 'w') as unmapped2:

            remap(self.g2p_unmapped1_fastq,
                  self.g2p_unmapped2_fastq,
                  prelim_csv,
                  remap_csv,
                  counts_csv,
                  conseq_csv,
                  unmapped1,
                  unmapped2,
                  scratch_path,
                  debug_file_prefix=debug_file_prefix)

        logger.info('Running sam2aln on %s.', self)
        with open(self.remap_csv) as remap_csv, \
                open(self.aligned_csv, 'w') as aligned_csv, \
                open(self.conseq_ins_csv, 'w') as conseq_ins_csv, \
                open(self.failed_csv, 'w') as failed_csv, \
                open(self.clipping_csv, 'w') as clipping_csv:

            sam2aln(remap_csv,
                    aligned_csv,
                    conseq_ins_csv,
                    failed_csv,
                    clipping_csv=clipping_csv)

        logger.info('Running aln2counts on %s.', self)
        with open(self.aligned_csv) as aligned_csv, \
                open(self.g2p_aligned_csv) as g2p_aligned_csv, \
                open(self.clipping_csv) as clipping_csv, \
                open(self.conseq_ins_csv) as conseq_ins_csv, \
                open(self.remap_conseq_csv) as remap_conseq_csv, \
                open(self.nuc_csv, 'w') as nuc_csv, \
                open(self.amino_csv, 'w') as amino_csv, \
                open(self.coord_ins_csv, 'w') as coord_ins_csv, \
                open(self.conseq_csv, 'w') as conseq_csv, \
                open(self.conseq_region_csv, 'w') as conseq_region_csv, \
                open(self.failed_align_csv, 'w') as failed_align_csv, \
                open(self.coverage_summary_csv, 'w') as coverage_summary_csv:

            aln2counts(aligned_csv,
                       nuc_csv,
                       amino_csv,
                       coord_ins_csv,
                       conseq_csv,
                       failed_align_csv,
                       coverage_summary_csv=coverage_summary_csv,
                       clipping_csv=clipping_csv,
                       conseq_ins_csv=conseq_ins_csv,
                       g2p_aligned_csv=g2p_aligned_csv,
                       remap_conseq_csv=remap_conseq_csv,
                       conseq_region_csv=conseq_region_csv)

        logger.info('Running coverage_plots on %s.', self)
        os.makedirs(self.coverage_maps)
        with open(self.amino_csv) as amino_csv, \
                open(self.coverage_scores_csv, 'w') as coverage_scores_csv:
            coverage_plot(amino_csv,
                          coverage_scores_csv,
                          coverage_maps_path=self.coverage_maps,
                          coverage_maps_prefix=self.name,
                          excluded_projects=excluded_projects)

        logger.info('Running cascade_report on %s.', self)
        with open(self.g2p_summary_csv) as g2p_summary_csv, \
                open(self.remap_counts_csv) as remap_counts_csv, \
                open(self.aligned_csv) as aligned_csv, \
                open(self.cascade_csv, 'w') as cascade_csv:
            cascade_report = CascadeReport(cascade_csv)
            cascade_report.g2p_summary_csv = g2p_summary_csv
            cascade_report.remap_counts_csv = remap_counts_csv
            cascade_report.aligned_csv = aligned_csv
            cascade_report.generate()
        logger.info('Finished sample %s.', self)