def test_simple(self, config_mock, savefig_mock): config_mock.return_value = self.config amino_csv = StringIO("""\ seed,region,q-cutoff,query.aa.pos,refseq.aa.pos,A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,* R1-seed,R1,15,100,1,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 R1-seed,R1,15,101,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0 R1-seed,R1,15,102,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0 """) expected_scores = """\ project,region,seed,q.cut,min.coverage,which.key.pos,off.score,on.score R1,R1,R1-seed,15,5,1,-1,1 R1-and-R2,R1,R1-seed,15,5,1,-1,1 """ scores_csv = StringIO() amino_csv.name = 'E1234.amino.csv' expected_calls = [ call('E1234.R1.R1.png'), call('E1234.R1-and-R2.R1.png') ] coverage_plot(amino_csv, coverage_scores_csv=scores_csv, coverage_maps_prefix='E1234') self.assertEqual(expected_calls, savefig_mock.mock_calls) self.assertEqual(expected_scores, scores_csv.getvalue())
def test_simple(self, config_mock, savefig_mock): config_mock.return_value = self.config amino_csv = StringIO("""\ seed,region,q-cutoff,query.aa.pos,refseq.aa.pos,A,C,D,E,F,G,H,I,K,L,M,N,P,Q,R,S,T,V,W,Y,* R1-seed,R1,15,100,1,0,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 R1-seed,R1,15,101,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0 R1-seed,R1,15,102,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,0,0,0,0,0,0 """) expected_scores = """\ project,region,seed,q.cut,min.coverage,which.key.pos,off.score,on.score R1,R1,R1-seed,15,5,1,-1,1 R1-and-R2,R1,R1-seed,15,5,1,-1,1 """ scores_csv = StringIO() amino_csv.name = 'E1234.amino.csv' expected_calls = [call('E1234.R1.R1.png'), call('E1234.R1-and-R2.R1.png')] coverage_plot(amino_csv, coverage_scores_csv=scores_csv) self.assertEqual(expected_calls, savefig_mock.mock_calls) self.assertEqual(expected_scores, scores_csv.getvalue())
def process_sample(sample_index, run_info, data_path, pssm): """ Process a single sample. :param sample_index: which sample to process from the session JSON :param run_info: run parameters loaded from the session JSON :param str data_path: the root folder for all BaseSpace data :param pssm: the pssm library for running G2P analysis """ scratch_path = os.path.join(data_path, 'scratch') sample_info = run_info.samples[sample_index] sample_id = sample_info['Id'] sample_name = sample_info['Name'] sample_dir = os.path.join(data_path, 'input', 'samples', sample_id, 'Data', 'Intensities', 'BaseCalls') if not os.path.exists(sample_dir): sample_dir = os.path.join(data_path, 'input', 'samples', sample_id) sample_path = None for root, _dirs, files in os.walk(sample_dir): sample_paths = fnmatch.filter(files, '*_R1_*') if sample_paths: sample_path = os.path.join(root, sample_paths[0]) break if sample_path is None: raise RuntimeError( 'No R1 file found for sample id {}.'.format(sample_id)) sample_path2 = sample_path.replace('_R1_', '_R2_') if not os.path.exists(sample_path2): raise RuntimeError('R2 file missing for sample id {}: {!r}.'.format( sample_id, sample_path2)) logger.info('Processing sample %s (%d of %d): %s (%s).', sample_id, sample_index + 1, len(run_info.samples), sample_name, sample_path) sample_out_path = create_app_result(data_path, run_info, sample_info, description='Mapping results', suffix='_QC') sample_scratch_path = os.path.join(scratch_path, sample_name) makedirs(sample_scratch_path) censored_path1 = os.path.join(sample_scratch_path, 'censored1.fastq') read_summary_path1 = os.path.join(sample_scratch_path, 'read1_summary.csv') censor_sample(sample_path, os.path.join(scratch_path, 'bad_cycles.csv'), censored_path1, read_summary_path1) censored_path2 = os.path.join(sample_scratch_path, 'censored2.fastq') read_summary_path2 = os.path.join(sample_scratch_path, 'read2_summary.csv') censor_sample(sample_path2, os.path.join(scratch_path, 'bad_cycles.csv'), censored_path2, read_summary_path2) logger.info('Running prelim_map (%d of %d).', sample_index + 1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'prelim.csv'), 'wb') as prelim_csv: prelim_map(censored_path1, censored_path2, prelim_csv) logger.info('Running remap (%d of %d).', sample_index + 1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'prelim.csv'), 'rU') as prelim_csv, \ open(os.path.join(sample_scratch_path, 'remap.csv'), 'wb') as remap_csv, \ open(os.path.join(sample_out_path, 'remap_counts.csv'), 'wb') as counts_csv, \ open(os.path.join(sample_out_path, 'remap_conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(sample_out_path, 'unmapped1.fastq'), 'w') as unmapped1, \ open(os.path.join(sample_out_path, 'unmapped2.fastq'), 'w') as unmapped2: remap(censored_path1, censored_path2, prelim_csv, remap_csv, counts_csv, conseq_csv, unmapped1, unmapped2, sample_scratch_path, nthreads=1) logger.info('Running sam2aln (%d of %d).', sample_index + 1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'remap.csv'), 'rU') as remap_csv, \ open(os.path.join(sample_scratch_path, 'aligned.csv'), 'wb') as aligned_csv, \ open(os.path.join(sample_out_path, 'conseq_ins.csv'), 'wb') as insert_csv, \ open(os.path.join(sample_out_path, 'failed_read.csv'), 'wb') as failed_csv: sam2aln(remap_csv, aligned_csv, insert_csv, failed_csv) logger.info('Running aln2counts (%d of %d).', sample_index + 1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'aligned.csv'), 'rU') as aligned_csv, \ open(os.path.join(sample_out_path, 'nuc.csv'), 'wb') as nuc_csv, \ open(os.path.join(sample_out_path, 'amino.csv'), 'wb') as amino_csv, \ open(os.path.join(sample_out_path, 'coord_ins.csv'), 'wb') as coord_ins_csv, \ open(os.path.join(sample_out_path, 'conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(sample_out_path, 'failed_align.csv'), 'wb') as failed_align_csv, \ open(os.path.join(sample_out_path, 'nuc_variants.csv'), 'wb') as nuc_variants_csv, \ open(os.path.join(sample_scratch_path, 'coverage_summary.csv'), 'wb') as coverage_summary_csv: aln2counts(aligned_csv, nuc_csv, amino_csv, coord_ins_csv, conseq_csv, failed_align_csv, nuc_variants_csv, coverage_summary_csv=coverage_summary_csv) logger.info('Running coverage_plots (%d of %d).', sample_index + 1, len(run_info.samples)) coverage_path = os.path.join(sample_out_path, 'coverage') with open(os.path.join(sample_out_path, 'amino.csv'), 'rU') as amino_csv, \ open(os.path.join(sample_out_path, 'coverage_scores.csv'), 'w') as coverage_scores_csv: coverage_plot(amino_csv, coverage_scores_csv, path_prefix=coverage_path) with open(os.path.join(sample_out_path, 'coverage_scores.csv'), 'rU') as coverage_scores_csv: reader = csv.DictReader(coverage_scores_csv) is_v3loop_good = False for row in reader: if row['region'] == 'V3LOOP': is_v3loop_good = row['on.score'] == '4' break if is_v3loop_good: logger.info('Running sam_g2p (%d of %d).', sample_index + 1, len(run_info.samples)) g2p_path = create_app_result(data_path, run_info, sample_info, description='Geno To Pheno results', suffix='_G2P') with open(os.path.join(sample_scratch_path, 'remap.csv'), 'rU') as remap_csv, \ open(os.path.join(sample_out_path, 'nuc.csv'), 'rU') as nuc_csv, \ open(os.path.join(g2p_path, 'g2p.csv'), 'wb') as g2p_csv, \ open(os.path.join(g2p_path, 'g2p_summary.csv'), 'wb') as g2p_summary_csv: sam_g2p(pssm=pssm, remap_csv=remap_csv, nuc_csv=nuc_csv, g2p_csv=g2p_csv, g2p_summary_csv=g2p_summary_csv, min_count=DEFAULT_MIN_COUNT)
def process_sample(sample_index, run_info, data_path, pssm): """ Process a single sample. :param sample_index: which sample to process from the session JSON :param run_info: run parameters loaded from the session JSON :param str data_path: the root folder for all BaseSpace data :param pssm: the pssm library for running G2P analysis """ scratch_path = os.path.join(data_path, 'scratch') sample_info = run_info.samples[sample_index] sample_id = sample_info['Id'] sample_name = sample_info['Name'] sample_dir = os.path.join(data_path, 'input', 'samples', sample_id, 'Data', 'Intensities', 'BaseCalls') if not os.path.exists(sample_dir): sample_dir = os.path.join(data_path, 'input', 'samples', sample_id) sample_path = None for root, _dirs, files in os.walk(sample_dir): sample_paths = fnmatch.filter(files, '*_R1_*') if sample_paths: sample_path = os.path.join(root, sample_paths[0]) break if sample_path is None: raise RuntimeError('No R1 file found for sample id {}.'.format(sample_id)) sample_path2 = sample_path.replace('_R1_', '_R2_') if not os.path.exists(sample_path2): raise RuntimeError('R2 file missing for sample id {}: {!r}.'.format( sample_id, sample_path2)) logger.info('Processing sample %s (%d of %d): %s (%s).', sample_id, sample_index+1, len(run_info.samples), sample_name, sample_path) sample_out_path = create_app_result(data_path, run_info, sample_info, description='Mapping results', suffix='_QC') sample_scratch_path = os.path.join(scratch_path, sample_name) makedirs(sample_scratch_path) censored_path1 = os.path.join(sample_scratch_path, 'censored1.fastq') read_summary_path1 = os.path.join(sample_scratch_path, 'read1_summary.csv') censor_sample(sample_path, os.path.join(scratch_path, 'bad_cycles.csv'), censored_path1, read_summary_path1) censored_path2 = os.path.join(sample_scratch_path, 'censored2.fastq') read_summary_path2 = os.path.join(sample_scratch_path, 'read2_summary.csv') censor_sample(sample_path2, os.path.join(scratch_path, 'bad_cycles.csv'), censored_path2, read_summary_path2) logger.info('Running prelim_map (%d of %d).', sample_index+1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'prelim.csv'), 'wb') as prelim_csv: prelim_map(censored_path1, censored_path2, prelim_csv) logger.info('Running remap (%d of %d).', sample_index+1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'prelim.csv'), 'rU') as prelim_csv, \ open(os.path.join(sample_scratch_path, 'remap.csv'), 'wb') as remap_csv, \ open(os.path.join(sample_out_path, 'remap_counts.csv'), 'wb') as counts_csv, \ open(os.path.join(sample_out_path, 'remap_conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(sample_out_path, 'unmapped1.fastq'), 'w') as unmapped1, \ open(os.path.join(sample_out_path, 'unmapped2.fastq'), 'w') as unmapped2: remap(censored_path1, censored_path2, prelim_csv, remap_csv, counts_csv, conseq_csv, unmapped1, unmapped2, sample_scratch_path, nthreads=1) logger.info('Running sam2aln (%d of %d).', sample_index+1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'remap.csv'), 'rU') as remap_csv, \ open(os.path.join(sample_scratch_path, 'aligned.csv'), 'wb') as aligned_csv, \ open(os.path.join(sample_out_path, 'conseq_ins.csv'), 'wb') as insert_csv, \ open(os.path.join(sample_out_path, 'failed_read.csv'), 'wb') as failed_csv: sam2aln(remap_csv, aligned_csv, insert_csv, failed_csv) logger.info('Running aln2counts (%d of %d).', sample_index+1, len(run_info.samples)) with open(os.path.join(sample_scratch_path, 'aligned.csv'), 'rU') as aligned_csv, \ open(os.path.join(sample_out_path, 'nuc.csv'), 'wb') as nuc_csv, \ open(os.path.join(sample_out_path, 'amino.csv'), 'wb') as amino_csv, \ open(os.path.join(sample_out_path, 'coord_ins.csv'), 'wb') as coord_ins_csv, \ open(os.path.join(sample_out_path, 'conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(sample_out_path, 'failed_align.csv'), 'wb') as failed_align_csv, \ open(os.path.join(sample_out_path, 'nuc_variants.csv'), 'wb') as nuc_variants_csv, \ open(os.path.join(sample_scratch_path, 'coverage_summary.csv'), 'wb') as coverage_summary_csv: aln2counts(aligned_csv, nuc_csv, amino_csv, coord_ins_csv, conseq_csv, failed_align_csv, nuc_variants_csv, coverage_summary_csv=coverage_summary_csv) logger.info('Running coverage_plots (%d of %d).', sample_index+1, len(run_info.samples)) coverage_path = os.path.join(sample_out_path, 'coverage') with open(os.path.join(sample_out_path, 'amino.csv'), 'rU') as amino_csv, \ open(os.path.join(sample_out_path, 'coverage_scores.csv'), 'w') as coverage_scores_csv: coverage_plot(amino_csv, coverage_scores_csv, path_prefix=coverage_path) with open(os.path.join(sample_out_path, 'coverage_scores.csv'), 'rU') as coverage_scores_csv: reader = csv.DictReader(coverage_scores_csv) is_v3loop_good = False for row in reader: if row['region'] == 'V3LOOP': is_v3loop_good = row['on.score'] == '4' break if is_v3loop_good: logger.info('Running sam_g2p (%d of %d).', sample_index+1, len(run_info.samples)) g2p_path = create_app_result(data_path, run_info, sample_info, description='Geno To Pheno results', suffix='_G2P') with open(os.path.join(sample_scratch_path, 'remap.csv'), 'rU') as remap_csv, \ open(os.path.join(sample_out_path, 'nuc.csv'), 'rU') as nuc_csv, \ open(os.path.join(g2p_path, 'g2p.csv'), 'wb') as g2p_csv, \ open(os.path.join(g2p_path, 'g2p_summary.csv'), 'wb') as g2p_summary_csv: sam_g2p(pssm=pssm, remap_csv=remap_csv, nuc_csv=nuc_csv, g2p_csv=g2p_csv, g2p_summary_csv=g2p_summary_csv, min_count=DEFAULT_MIN_COUNT)
def process_sample(self, fastq1, progress, prefixes, image_paths, error_log): fastq2 = fastq1.replace('_R1_001', '_R2_001').replace('censored1', 'censored2') if not os.path.exists(fastq2): raise IOError('ERROR: Missing R2 file for {}'.format(fastq1)) prefix = os.path.basename(fastq1).replace('_L001_R1_001.fastq', '').replace( '.censored1.fastq', '') prefixes.append(prefix) output_csv = prefix + '.prelim.csv' self.write('Processing sample {} ({})\n'.format(prefix, progress)) with open(output_csv, 'wb') as handle: prelim_map(fastq1, fastq2, handle, nthreads=self.nthreads, callback=self.callback, stderr=error_log) # prepare file handles for remap stage with open(output_csv, 'rU') as prelim_csv, \ open(os.path.join(self.workdir, prefix + '.remap.csv'), 'wb') as remap_csv, \ open(os.path.join(self.workdir, prefix + '.remap_counts.csv'), 'wb') as counts_csv, \ open(os.path.join(self.workdir, prefix + '.remap_conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(self.workdir, prefix + '.unmapped1.fastq'), 'w') as unmapped1, \ open(os.path.join(self.workdir, prefix + '.unmapped2.fastq'), 'w') as unmapped2: self.write('... remapping\n') self.parent.update() self.progress_bar['value'] = 0 remap(fastq1, fastq2, prelim_csv, remap_csv, counts_csv, conseq_csv, unmapped1, unmapped2, self.workdir, nthreads=self.nthreads, callback=self.callback, stderr=error_log) # prepare file handles for conversion from SAM format to alignment with open(os.path.join(self.workdir, prefix + '.remap.csv'), 'rU') as remap_csv, \ open(os.path.join(self.workdir, prefix + '.aligned.csv'), 'wb') as aligned_csv, \ open(os.path.join(self.workdir, prefix + '.insert.csv'), 'wb') as insert_csv, \ open(os.path.join(self.workdir, prefix + '.failed.csv'), 'wb') as failed_csv: self.write('... converting into alignment\n') self.parent.update() sam2aln(remap_csv, aligned_csv, insert_csv, failed_csv, nthreads=self.nthreads) with open(os.path.join(self.workdir, prefix + '.aligned.csv'), 'rU') as aligned_csv, \ open(os.path.join(self.workdir, prefix + '.nuc.csv'), 'wb') as nuc_csv, \ open(os.path.join(self.workdir, prefix + '.amino.csv'), 'wb') as amino_csv, \ open(os.path.join(self.workdir, prefix + '.coord_ins.csv'), 'wb') as coord_ins_csv, \ open(os.path.join(self.workdir, prefix + '.conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(self.workdir, prefix + '.failed_align.csv'), 'wb') as failed_align_csv, \ open(os.path.join(self.workdir, prefix + '.nuc_variants.csv'), 'wb') as nuc_variants_csv: self.parent.update() aln2counts(aligned_csv, nuc_csv, amino_csv, coord_ins_csv, conseq_csv, failed_align_csv, nuc_variants_csv, callback=self.callback) self.write('... generating coverage plots\n') self.parent.update() with open(os.path.join(self.workdir, prefix + '.amino.csv'), 'rU') as amino_csv: image_paths += coverage_plot(amino_csv) self.write('... performing g2p scoring on samples covering HIV-1 V3\n') self.parent.update() with open(os.path.join(self.workdir, prefix + '.remap.csv'), 'rU') as remap_csv, \ open(os.path.join(self.workdir, prefix + '.nuc.csv'), 'rU') as nuc_csv, \ open(os.path.join(self.workdir, prefix + '.g2p.csv'), 'wb') as g2p_csv: sam_g2p(pssm=self.pssm, remap_csv=remap_csv, nuc_csv=nuc_csv, g2p_csv=g2p_csv)
def process_sample(self, fastq1, progress, prefixes, image_paths, error_log): fastq2 = fastq1.replace('_R1_001', '_R2_001').replace('censored1', 'censored2') if not os.path.exists(fastq2): raise IOError('ERROR: Missing R2 file for {}'.format(fastq1)) prefix = os.path.basename(fastq1).replace('_L001_R1_001.fastq', '').replace('.censored1.fastq', '') prefixes.append(prefix) output_csv = prefix + '.prelim.csv' self.write('Processing sample {} ({})\n'.format(prefix, progress)) with open(output_csv, 'wb') as handle: prelim_map(fastq1, fastq2, handle, nthreads=self.nthreads, callback=self.callback, stderr=error_log) # prepare file handles for remap stage with open(output_csv, 'rU') as prelim_csv, \ open(os.path.join(self.workdir, prefix + '.remap.csv'), 'wb') as remap_csv, \ open(os.path.join(self.workdir, prefix + '.remap_counts.csv'), 'wb') as counts_csv, \ open(os.path.join(self.workdir, prefix + '.remap_conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(self.workdir, prefix + '.unmapped1.fastq'), 'w') as unmapped1, \ open(os.path.join(self.workdir, prefix + '.unmapped2.fastq'), 'w') as unmapped2: self.write('... remapping\n') self.parent.update() self.progress_bar['value'] = 0 remap(fastq1, fastq2, prelim_csv, remap_csv, counts_csv, conseq_csv, unmapped1, unmapped2, self.workdir, nthreads=self.nthreads, callback=self.callback, stderr=error_log) # prepare file handles for conversion from SAM format to alignment with open(os.path.join(self.workdir, prefix + '.remap.csv'), 'rU') as remap_csv, \ open(os.path.join(self.workdir, prefix + '.aligned.csv'), 'wb') as aligned_csv, \ open(os.path.join(self.workdir, prefix + '.insert.csv'), 'wb') as insert_csv, \ open(os.path.join(self.workdir, prefix + '.failed.csv'), 'wb') as failed_csv: self.write('... converting into alignment\n') self.parent.update() sam2aln(remap_csv, aligned_csv, insert_csv, failed_csv, nthreads=self.nthreads) with open(os.path.join(self.workdir, prefix + '.aligned.csv'), 'rU') as aligned_csv, \ open(os.path.join(self.workdir, prefix + '.nuc.csv'), 'wb') as nuc_csv, \ open(os.path.join(self.workdir, prefix + '.amino.csv'), 'wb') as amino_csv, \ open(os.path.join(self.workdir, prefix + '.coord_ins.csv'), 'wb') as coord_ins_csv, \ open(os.path.join(self.workdir, prefix + '.conseq.csv'), 'wb') as conseq_csv, \ open(os.path.join(self.workdir, prefix + '.failed_align.csv'), 'wb') as failed_align_csv, \ open(os.path.join(self.workdir, prefix + '.nuc_variants.csv'), 'wb') as nuc_variants_csv: self.parent.update() aln2counts(aligned_csv, nuc_csv, amino_csv, coord_ins_csv, conseq_csv, failed_align_csv, nuc_variants_csv, callback=self.callback) self.write('... generating coverage plots\n') self.parent.update() with open(os.path.join(self.workdir, prefix + '.amino.csv'), 'rU') as amino_csv: image_paths += coverage_plot(amino_csv) self.write('... performing g2p scoring on samples covering HIV-1 V3\n') self.parent.update() with open(os.path.join(self.workdir, prefix + '.remap.csv'), 'rU') as remap_csv, \ open(os.path.join(self.workdir, prefix + '.nuc.csv'), 'rU') as nuc_csv, \ open(os.path.join(self.workdir, prefix + '.g2p.csv'), 'wb') as g2p_csv: sam_g2p(pssm=self.pssm, remap_csv=remap_csv, nuc_csv=nuc_csv, g2p_csv=g2p_csv)