def combine_polished_isoforms(split_indices, split_hq_fns, split_lq_fns, combined_hq_fa, combined_hq_fq, combined_lq_fa, combined_lq_fq, hq_lq_prefix_dict_pickle, sample_name): """Combine split hq (lq) files and save to combined_dir. Dumping hq|lq prefix dictionary to pickle. Return an instance of CombinedFiles. Parameters: split_indices -- indices of splitted cluster bins. split_hq_fns -- hq files, #['*/all_quivered_hq.100_30_0.99.fastq', ...] split_lq_fns -- lq files, #['all_quivered_lq.fastq', ...] """ assert len(split_indices) == len(split_hq_fns) assert len(split_indices) == len(split_lq_fns) assert all([f.endswith(".fastq") for f in split_hq_fns + split_lq_fns]) hq_pre_dict, lq_pre_dict = {}, {} hq_fa_writer = FastaWriter(combined_hq_fa) hq_fq_writer = FastqWriter(combined_hq_fq) lq_fa_writer = FastaWriter(combined_lq_fa) lq_fq_writer = FastqWriter(combined_lq_fq) for i, split_hq, split_lq in zip(split_indices, split_hq_fns, split_lq_fns): logging.debug("Adding prefix i%s_| to %s, %s", str(i), split_hq, split_lq) hq_prefix = combined_prefix(cluster_bin_index=i, isoform_type="HQ", sample_name=sample_name) lq_prefix = combined_prefix(cluster_bin_index=i, isoform_type="LQ", sample_name=sample_name) hq_pre_dict[hq_prefix] = op.dirname(op.abspath(split_hq)) lq_pre_dict[lq_prefix] = op.dirname(op.abspath(split_lq)) with FastqReader(split_hq) as reader: for read in reader: name = combined_cid_hq_name(cluster_bin_index=i, name=read.name, sample_name=sample_name) hq_fa_writer.writeRecord(name, read.sequence[:]) hq_fq_writer.writeRecord(name, read.sequence[:], read.quality) with FastqReader(split_lq) as reader: for read in reader: name = combined_cid_lq_name(cluster_bin_index=i, name=read.name, sample_name=sample_name) lq_fa_writer.writeRecord(name, read.sequence[:]) lq_fq_writer.writeRecord(name, read.sequence[:], read.quality) hq_fa_writer.close() hq_fq_writer.close() lq_fa_writer.close() lq_fq_writer.close() logging.info("HQ polished output combined to:%s", combined_hq_fq) logging.info("LQ polished output combined to:%s", combined_lq_fq) logging.info("Dumping hq|lq prefix dictionary to:%s", hq_lq_prefix_dict_pickle) with open(hq_lq_prefix_dict_pickle, 'wb') as writer: cPickle.dump({'HQ': hq_pre_dict, 'LQ': lq_pre_dict}, writer)
def test_contigset_write(self): fasta = upstreamData.getLambdaFasta() ds = ContigSet(fasta) assert isinstance(ds.resourceReaders()[0], IndexedFastaReader) outdir = tempfile.mkdtemp(suffix="dataset-unittest") outfn = os.path.join(outdir, 'test.fasta') w = FastaWriter(outfn) for rec in ds: w.writeRecord(rec) w.close() fas = FastaReader(outfn) for rec in fas: # make sure a __repr__ didn't slip through: assert not rec.sequence.startswith('<')
def test_contigset_write(self): fasta = upstreamData.getLambdaFasta() ds = ContigSet(fasta) self.assertTrue(isinstance(ds.resourceReaders()[0], IndexedFastaReader)) outdir = tempfile.mkdtemp(suffix="dataset-unittest") outfn = os.path.join(outdir, 'test.fasta') w = FastaWriter(outfn) for rec in ds: w.writeRecord(rec) w.close() fas = FastaReader(outfn) for rec in fas: # make sure a __repr__ didn't slip through: self.assertFalse(rec.sequence.startswith('<'))
def split(self): """Split `input_fasta` into smaller files each containing `reads_per_split` reads. Return splitted fasta.""" split_index = 0 self.out_fns = [] writer = FastaWriter(self._out_fn(split_index)) self.out_fns.append(self._out_fn(split_index)) with FastaReader(self.input_fasta) as reader: for ridx, r in enumerate(reader): if ridx % self.reads_per_split == 0 and ridx != 0: split_index += 1 writer.close() writer = FastaWriter(self._out_fn(split_index)) self.out_fns.append(self._out_fn(split_index)) writer.writeRecord(r.name, r.sequence) writer.close() return list(self.out_fns)
def save(self, dir): """ Save this ArrowEvidence to a directory. The directory will be *created* by this method. Format of evidence dump: evidence_dump/ ref000001/ 0-1005/ consensus.fa arrow-scores.h5 995-2005/ ... """ logging.info("Dumping evidence to %s" % (dir, )) join = os.path.join if os.path.exists(dir): raise Exception( "Evidence dump does not expect directory %s to exist." % dir) os.makedirs(dir) #refFasta = FastaWriter(join(dir, "reference.fa")) #readsFasta = FastaWriter(join(dir, "reads.fa")) consensusFasta = FastaWriter(join(dir, "consensus.fa")) windowName = self.refName + (":%d-%d" % (self.refStart, self.refEnd)) #refFasta.writeRecord(windowName, self.refSequence) #refFasta.close() consensusFasta.writeRecord(windowName + "|arrow", self.consensus) consensusFasta.close() import h5py arrowScoreFile = h5py.File(join(dir, "arrow-scores.h5")) arrowScoreFile.create_dataset("Scores", data=self.scores) vlen_str = h5py.special_dtype(vlen=str) arrowScoreFile.create_dataset("RowNames", data=self.rowNames, dtype=vlen_str) arrowScoreFile.create_dataset("ColumnNames", data=self.colNames, dtype=vlen_str) arrowScoreFile.create_dataset("BaselineScores", data=self.baselineScores) arrowScoreFile.close()
def main(parser): args = parser.parse_args() # Get outfile name if args.outFile is None: outfile = 'nobarcode.fasta' if args.fasta else 'nobarcode.fastq' else: outfile = args.outFile # Input files barcodeFofn = (l.strip('\n') for l in args.barcode_fofn) ccsFofn = (l.strip('\n') for l in args.ccs_fofn) # Get the read names that are not barcoded no_barcode = set() for barcodeFile in barcodeFofn: bcH5 = BarcodeH5Reader(barcodeFile) for row in bcH5.bestDS: if row[3] / row[1] < args.minAvgBarcodeScore: no_barcode.add('%s/%d' % (bcH5.movieName, row[0])) if args.fasta: outh = FastaWriter(outfile) else: outh = FastqWriter(outfile) for ccsFile in ccsFofn: ccsH5 = BasH5Reader(ccsFile) for ccsRead in ccsH5.ccsReads(): if ccsRead.zmw.zmwName in no_barcode: basecalls = ccsRead.basecalls() if len(basecalls) >= args.minMaxInsertLength: if args.fasta: outh.writeRecord( FastaRecord(ccsRead.zmw.zmwName, ccsRead.basecalls())) else: outh.writeRecord( FastqRecord(ccsRead.zmw.zmwName, ccsRead.basecalls(), ccsRead.QualityValue())) outh.close()
def combine_consensus_isoforms(split_indices, split_files, combined_consensus_isoforms_fa, sample_name): """ Parameters: split_indices -- indices of splitted cluster bins. split_files -- consensus isoforms in each splitted cluster bin. """ assert len(split_indices) == len(split_files) writer = FastaWriter(combined_consensus_isoforms_fa) for i, split_fn in zip(split_indices, split_files): logging.debug("Adding prefix i%s to %s.", str(i), split_fn) with ContigSetReaderWrapper(split_fn) as reader: for read in reader: name = combined_cid_ice_name(name=read.name, cluster_bin_index=i, sample_name=sample_name) writer.writeRecord(name, read.sequence[:]) writer.close() logging.info("Consensus isoforms output combined to:%s", combined_consensus_isoforms_fa)
def main(parser): args = parser.parse_args() # Get outfile name if args.outFile is None: outfile = 'nobarcode.fasta' if args.fasta else 'nobarcode.fastq' else: outfile = args.outFile # Input files barcodeFofn = (l.strip('\n') for l in args.barcode_fofn) baxFofn = (l.strip('\n') for l in args.bax_fofn) # Get the read names that are not barcoded no_barcode = defaultdict(set) for barcodeFile in barcodeFofn: bcH5 = BarcodeH5Reader(barcodeFile) for row in bcH5.bestDS: if row[3] / row[1] < args.minAvgBarcodeScore: no_barcode[bcH5.movieName].add(row[0]) if args.fasta: outh = FastaWriter(outfile) else: outh = FastqWriter(outfile) for baxFile in baxFofn: baxH5 = BasH5Reader(baxFile) for holeNum in baxH5.sequencingZmws: if holeNum in no_barcode[baxH5.movieName]: zmw = baxH5[holeNum] if len(zmw.subreads) and max(len(sr.basecalls()) for sr in zmw.subreads) >= args.minMaxInsertLength: for subread in zmw.subreads: if len(subread.basecalls()) >= args.minSubreadLength: if args.fasta: outh.writeRecord(FastaRecord(subread.readName,subread.basecalls())) else: outh.writeRecord(FastqRecord(subread.readName,subread.basecalls(),subread.QualityValue())) outh.close()
def split(self, reads_in_first_split=None): """Split `input_fasta` into smaller files each containing `reads_per_split` reads. Return splitted fasta.""" split_index = 0 self.out_fns = [] writer = FastaWriter(self._out_fn(split_index)) self.out_fns.append(self._out_fn(split_index)) if reads_in_first_split is None: reads_in_first_split = self.reads_per_split with ContigSetReaderWrapper(self.input_fasta) as reader: for ridx, r in enumerate(reader): if ((split_index == 0 and ridx == reads_in_first_split) or (split_index > 0 and ridx % self.reads_per_split == 0)) \ and ridx != 0: split_index += 1 writer.close() writer = FastaWriter(self._out_fn(split_index)) self.out_fns.append(self._out_fn(split_index)) writer.writeRecord(r.name, r.sequence[:]) writer.close() return list(self.out_fns)
def main(parser): args = parser.parse_args() # Get outfile name if args.outFile is None: outfile = 'nobarcode.fasta' if args.fasta else 'nobarcode.fastq' else: outfile = args.outFile # Input files barcodeFofn = (l.strip('\n') for l in args.barcode_fofn) ccsFofn = (l.strip('\n') for l in args.ccs_fofn) # Get the read names that are not barcoded no_barcode = set() for barcodeFile in barcodeFofn: bcH5 = BarcodeH5Reader(barcodeFile) for row in bcH5.bestDS: if row[3] / row[1] < args.minAvgBarcodeScore: no_barcode.add('%s/%d' % (bcH5.movieName,row[0])) if args.fasta: outh = FastaWriter(outfile) else: outh = FastqWriter(outfile) for ccsFile in ccsFofn: ccsH5 = BasH5Reader(ccsFile) for ccsRead in ccsH5.ccsReads(): if ccsRead.zmw.zmwName in no_barcode: basecalls = ccsRead.basecalls() if len(basecalls) >= args.minMaxInsertLength: if args.fasta: outh.writeRecord(FastaRecord(ccsRead.zmw.zmwName, ccsRead.basecalls())) else: outh.writeRecord(FastqRecord(ccsRead.zmw.zmwName, ccsRead.basecalls(), ccsRead.QualityValue())) outh.close()
def save(self, dir): """ Save this ArrowEvidence to a directory. The directory will be *created* by this method. Format of evidence dump: evidence_dump/ ref000001/ 0-1005/ consensus.fa arrow-scores.h5 995-2005/ ... """ logging.info("Dumping evidence to %s" % (dir,)) join = os.path.join if os.path.exists(dir): raise Exception, "Evidence dump does not expect directory %s to exist." % dir os.makedirs(dir) #refFasta = FastaWriter(join(dir, "reference.fa")) #readsFasta = FastaWriter(join(dir, "reads.fa")) consensusFasta = FastaWriter(join(dir, "consensus.fa")) windowName = self.refName + (":%d-%d" % (self.refStart, self.refEnd)) #refFasta.writeRecord(windowName, self.refSequence) #refFasta.close() consensusFasta.writeRecord(windowName + "|arrow", self.consensus) consensusFasta.close() arrowScoreFile = h5py.File(join(dir, "arrow-scores.h5")) arrowScoreFile.create_dataset("Scores", data=self.scores) vlen_str = h5py.special_dtype(vlen=str) arrowScoreFile.create_dataset("RowNames", data=self.rowNames, dtype=vlen_str) arrowScoreFile.create_dataset("ColumnNames", data=self.colNames, dtype=vlen_str) arrowScoreFile.create_dataset("BaselineScores", data=self.baselineScores) arrowScoreFile.close()
def dumpEvidence(evidenceDumpBaseDirectory, refWindow, refSequence, alns, quiverConsensus): """This will import h5py at runtime. """ # Format of evidence dump: # evidence_dump/ # ref000001/ # 0-1005/ # reference.fa # reads.fa # consensus.fa # quiver-scores.h5 # 995-2005/ # ... join = os.path.join refId, refStart, refEnd = refWindow refName = reference.idToName(refId) windowDirectory = join(evidenceDumpBaseDirectory, refName, "%d-%d" % (refStart, refEnd)) logging.info("Dumping evidence to %s" % (windowDirectory, )) if os.path.exists(windowDirectory): raise Exception( "Evidence dump does not expect directory %s to exist." % windowDirectory) os.makedirs(windowDirectory) refFasta = FastaWriter(join(windowDirectory, "reference.fa")) readsFasta = FastaWriter(join(windowDirectory, "reads.fa")) consensusFasta = FastaWriter(join(windowDirectory, "consensus.fa")) windowName = refName + (":%d-%d" % (refStart, refEnd)) refFasta.writeRecord(windowName, refSequence) refFasta.close() consensusFasta.writeRecord(windowName + "|quiver", quiverConsensus.sequence) consensusFasta.close() rowNames, columnNames, baselineScores, scores = scoreMatrix( quiverConsensus.mms) import h5py quiverScoreFile = h5py.File(join(windowDirectory, "quiver-scores.h5")) quiverScoreFile.create_dataset("Scores", data=scores) vlen_str = h5py.special_dtype(vlen=str) quiverScoreFile.create_dataset("RowNames", data=rowNames, dtype=vlen_str) quiverScoreFile.create_dataset("ColumnNames", data=columnNames, dtype=vlen_str) quiverScoreFile.create_dataset("BaselineScores", data=baselineScores) quiverScoreFile.close() for aln in alns: readsFasta.writeRecord(str(aln.rowNumber), aln.read(orientation="genomic", aligned=False)) readsFasta.close()
def dumpEvidence(evidenceDumpBaseDirectory, refWindow, refSequence, alns, quiverConsensus): # Format of evidence dump: # evidence_dump/ # ref000001/ # 0-1005/ # reference.fa # reads.fa # consensus.fa # quiver-scores.h5 # 995-2005/ # ... join = os.path.join refId, refStart, refEnd = refWindow refName = reference.idToName(refId) windowDirectory = join(evidenceDumpBaseDirectory, refName, "%d-%d" % (refStart, refEnd)) logging.info("Dumping evidence to %s" % (windowDirectory,)) if os.path.exists(windowDirectory): raise Exception, "Evidence dump does not expect directory %s to exist." % windowDirectory os.makedirs(windowDirectory) refFasta = FastaWriter(join(windowDirectory, "reference.fa")) readsFasta = FastaWriter(join(windowDirectory, "reads.fa")) consensusFasta = FastaWriter(join(windowDirectory, "consensus.fa")) windowName = refName + (":%d-%d" % (refStart, refEnd)) refFasta.writeRecord(windowName, refSequence) refFasta.close() consensusFasta.writeRecord(windowName + "|quiver", quiverConsensus.sequence) consensusFasta.close() rowNames, columnNames, baselineScores, scores = scoreMatrix(quiverConsensus.mms) quiverScoreFile = h5py.File(join(windowDirectory, "quiver-scores.h5")) quiverScoreFile.create_dataset("Scores", data=scores) vlen_str = h5py.special_dtype(vlen=str) quiverScoreFile.create_dataset("RowNames", data=rowNames, dtype=vlen_str) quiverScoreFile.create_dataset("ColumnNames", data=columnNames, dtype=vlen_str) quiverScoreFile.create_dataset("BaselineScores", data=baselineScores) quiverScoreFile.close() for aln in alns: readsFasta.writeRecord(str(aln.rowNumber), aln.read(orientation="genomic", aligned=False)) readsFasta.close()
def pick_rep(isoform_filename, gff_filename, group_filename, output_filename, pick_least_err_instead=False, bad_gff_filename=None): """ For each group of collapsed sam records, select the representative record. If is FASTA file -- then always pick the longest one If is FASTQ file -- then If pick_least_err_instead is True, pick the one w/ least number of expected base errors Else, pick the longest one """ fd = None is_fq = False dummy_prefix, _suffix = parse_ds_filename(isoform_filename) if _suffix == "fasta": fd = FastaRandomReader(isoform_filename) elif _suffix == "fastq": fd = FastqRandomReader(isoform_filename) is_fq = True elif _suffix == "contigset.xml": fd = ContigSet(isoform_filename) _fns = fd.toExternalFiles() if len(_fns) == 1 and _fns[0].endswith(".fq") or _fns[0].endswith(".fastq"): fd = FastqRandomReader(_fns[0]) is_fq = True else: if not fd.isIndexed: # Must be indexed FASTA, or exactly contains one FASTQ file raise IOError("%s must contain either indexed FASTA files or " % isoform_filename + "contain exactly one FASTQ file!") else: raise IOError("Unable to recognize file type of %s." % isoform_filename) fa_out_fn, fq_out_fn, ds_out_fn = None, None, None _prefix, _suffix = parse_ds_filename(output_filename) if _suffix == "fasta": fa_out_fn = output_filename elif _suffix == "fastq": if not is_fq: raise ValueError("Input file %s is not FASTQ while output is." % isoform_filename) else: fq_out_fn = output_filename elif _suffix == "contigset.xml": # output is contigset.xml ds_out_fn = output_filename fa_out_fn = _prefix + ".fasta" if is_fq: fq_out_fn = _prefix + ".fastq" else: raise IOError("Unable to recognize file type of %s." % output_filename) fa_writer = FastaWriter(fa_out_fn) if fa_out_fn is not None else None fq_writer = FastqWriter(fq_out_fn) if fq_out_fn is not None else None coords = {} for r in CollapseGffReader(gff_filename): tid = r.transcript_id coords[tid] = "{0}:{1}-{2}({3})".format(r.seqid, r.start, r.end, r.strand) if bad_gff_filename is not None: for r in CollapseGffReader(gff_filename): tid = r.transcript_id coords[tid] = "{0}:{1}-{2}({3})".format(r.seqid, r.start, r.end, r.strand) for group in GroupReader(group_filename): pb_id, members = group.name, group.members if not pb_id in coords: raise ValueError("Could not find %s in %s and %s" % (pb_id, gff_filename, bad_gff_filename)) #logging.info("Picking representative sequence for %s", pb_id) best_id = None best_seq = None best_qual = None best_err = 9999999 err = 9999999 max_len = 0 for x in members: if is_fq and pick_least_err_instead: err = sum(i**-(i/10.) for i in fd[x].quality) if (is_fq and pick_least_err_instead and err < best_err) or \ ((not is_fq or not pick_least_err_instead) and len(fd[x].sequence) >= max_len): best_id = x best_seq = fd[x].sequence if is_fq: best_qual = fd[x].quality best_err = err max_len = len(fd[x].sequence) _id_ = "{0}|{1}|{2}".format(pb_id, coords[pb_id], best_id) _seq_ = best_seq if fq_writer is not None: fq_writer.writeRecord(_id_, _seq_, best_qual) if fa_writer is not None: fa_writer.writeRecord(_id_, _seq_) if fa_writer is not None: fa_writer.close() if fq_writer is not None: fq_writer.close() if ds_out_fn is not None: as_contigset(fa_out_fn, ds_out_fn)
class ResultCollector(object): """ Gathers results and writes to a file. """ def __init__(self, resultsQueue, algorithmName, algorithmConfig): self._resultsQueue = resultsQueue self._algorithmName = algorithmName self._algorithmConfig = algorithmConfig def _run(self): self.onStart() sentinelsReceived = 0 while sentinelsReceived < options.numWorkers: result = self._resultsQueue.get() if result is None: sentinelsReceived += 1 else: self.onResult(result) self.onFinish() def run(self): if options.doProfiling: cProfile.runctx("self._run()", globals=globals(), locals=locals(), filename=os.path.join( options.temporaryDirectory, "profile-%s.out" % (self.name))) else: self._run() # ================================== # Overridable interface begins here. # def onStart(self): self.referenceBasesProcessedById = OrderedDict() for refId in reference.byName: self.referenceBasesProcessedById[refId] = 0 self.variantsByRefId = defaultdict(list) self.consensusChunksByRefId = defaultdict(list) # open file writers self.fastaWriter = None self.fastqWriter = None self.gffWriter = None self.vcfWriter = None if options.fastaOutputFilename: self.fastaWriter = FastaWriter(options.fastaOutputFilename) if options.fastqOutputFilename: self.fastqWriter = FastqWriter(options.fastqOutputFilename) if options.gffOutputFilename: self.gffWriter = VariantsGffWriter(options.gffOutputFilename, vars(options), reference.byName.values()) if options.vcfOutputFilename: self.vcfWriter = VariantsVcfWriter(options.vcfOutputFilename, vars(options), reference.byName.values()) def onResult(self, result): window, cssAndVariants = result css, variants = cssAndVariants self._recordNewResults(window, css, variants) self._flushContigIfCompleted(window) def onFinish(self): logging.info("Analysis completed.") if self.fastaWriter: self.fastaWriter.close() if self.fastqWriter: self.fastqWriter.close() if self.gffWriter: self.gffWriter.close() if self.vcfWriter: self.vcfWriter.close() logging.info("Output files completed.") def _recordNewResults(self, window, css, variants): refId, refStart, refEnd = window self.consensusChunksByRefId[refId].append(css) self.variantsByRefId[refId] += variants self.referenceBasesProcessedById[refId] += (refEnd - refStart) def _flushContigIfCompleted(self, window): refId, _, _ = window refEntry = reference.byName[refId] refName = refEntry.fullName basesProcessed = self.referenceBasesProcessedById[refId] requiredBases = reference.numReferenceBases(refId, options.referenceWindows) if basesProcessed == requiredBases: # This contig is done, so we can dump to file and delete # the data structures. if self.gffWriter or self.vcfWriter: variants = sorted(self.variantsByRefId[refId]) if self.gffWriter: self.gffWriter.writeVariants(variants) if self.vcfWriter: self.vcfWriter.writeVariants(variants) del self.variantsByRefId[refId] # # If the user asked to analyze a window or a set of # windows, we output a FAST[AQ] contig per analyzed # window. Otherwise we output a fasta contig per # reference contig. # # We try to be intelligent about naming the output # contigs, to include window information where applicable. # for span in reference.enumerateSpans(refId, options.referenceWindows): _, s, e = span if (s == 0) and (e == refEntry.length): spanName = refName else: spanName = refName + "_%d_%d" % (s, e) cssName = consensus.consensusContigName( spanName, self._algorithmName) # Gather just the chunks pertaining to this span chunksThisSpan = [ chunk for chunk in self.consensusChunksByRefId[refId] if windows.windowsIntersect(chunk.refWindow, span) ] css = consensus.join(chunksThisSpan) if self.fastaWriter: self.fastaWriter.writeRecord(cssName, css.sequence) if self.fastqWriter: self.fastqWriter.writeRecord(cssName, css.sequence, css.confidence) del self.consensusChunksByRefId[refId]
class ResultCollector(object): """ Gathers results and writes to a file. """ def __init__(self, resultsQueue, algorithmName, algorithmConfig): self._resultsQueue = resultsQueue self._algorithmName = algorithmName self._algorithmConfig = algorithmConfig def _run(self): self.onStart() sentinelsReceived = 0 while sentinelsReceived < options.numWorkers: result = self._resultsQueue.get() if result is None: sentinelsReceived += 1 else: self.onResult(result) self.onFinish() def run(self): if options.doProfiling: cProfile.runctx("self._run()", globals=globals(), locals=locals(), filename=os.path.join(options.temporaryDirectory, "profile-%s.out" % (self.name))) else: self._run() # ================================== # Overridable interface begins here. # def onStart(self): self.referenceBasesProcessedById = OrderedDict() for refId in reference.byName: self.referenceBasesProcessedById[refId] = 0 self.variantsByRefId = defaultdict(list) self.consensusChunksByRefId = defaultdict(list) # open file writers self.fastaWriter = self.fastqWriter = self.gffWriter = None if options.fastaOutputFilename: self.fastaWriter = FastaWriter(options.fastaOutputFilename) if options.fastqOutputFilename: self.fastqWriter = FastqWriter(options.fastqOutputFilename) if options.gffOutputFilename: self.gffWriter = VariantsGffWriter(options.gffOutputFilename, vars(options), reference.byName.values()) def onResult(self, result): window, cssAndVariants = result css, variants = cssAndVariants self._recordNewResults(window, css, variants) self._flushContigIfCompleted(window) def onFinish(self): logging.info("Analysis completed.") if self.fastaWriter: self.fastaWriter.close() if self.fastqWriter: self.fastqWriter.close() if self.gffWriter: self.gffWriter.close() logging.info("Output files completed.") def _recordNewResults(self, window, css, variants): refId, refStart, refEnd = window self.consensusChunksByRefId[refId].append(css) self.variantsByRefId[refId] += variants self.referenceBasesProcessedById[refId] += (refEnd - refStart) def _flushContigIfCompleted(self, window): refId, _, _ = window refEntry = reference.byName[refId] refName = refEntry.fullName basesProcessed = self.referenceBasesProcessedById[refId] requiredBases = reference.numReferenceBases(refId, options.referenceWindows) if basesProcessed == requiredBases: # This contig is done, so we can dump to file and delete # the data structures. if self.gffWriter: self.gffWriter.writeVariants(sorted(self.variantsByRefId[refId])) del self.variantsByRefId[refId] # # If the user asked to analyze a window or a set of # windows, we output a FAST[AQ] contig per analyzed # window. Otherwise we output a fasta contig per # reference contig. # # We try to be intelligent about naming the output # contigs, to include window information where applicable. # for span in reference.enumerateSpans(refId, options.referenceWindows): _, s, e = span if (s == 0) and (e == refEntry.length): spanName = refName else: spanName = refName + "_%d_%d" % (s, e) cssName = consensus.consensusContigName(spanName, self._algorithmName) # Gather just the chunks pertaining to this span chunksThisSpan = [ chunk for chunk in self.consensusChunksByRefId[refId] if windows.windowsIntersect(chunk.refWindow, span) ] css = consensus.join(chunksThisSpan) if self.fastaWriter: self.fastaWriter.writeRecord(cssName, css.sequence) if self.fastqWriter: self.fastqWriter.writeRecord(cssName, css.sequence, css.confidence) del self.consensusChunksByRefId[refId]
def pick_rep(isoform_filename, gff_filename, group_filename, output_filename, pick_least_err_instead=False, bad_gff_filename=None): """ For each group of collapsed sam records, select the representative record. If is FASTA file -- then always pick the longest one If is FASTQ file -- then If pick_least_err_instead is True, pick the one w/ least number of expected base errors Else, pick the longest one """ fd = None is_fq = False dummy_prefix, _suffix = parse_ds_filename(isoform_filename) if _suffix == "fasta": fd = FastaRandomReader(isoform_filename) elif _suffix == "fastq": fd = FastqRandomReader(isoform_filename) is_fq = True elif _suffix == "contigset.xml": fd = ContigSet(isoform_filename) _fns = fd.toExternalFiles() if len(_fns) == 1 and _fns[0].endswith(".fq") or _fns[0].endswith( ".fastq"): fd = FastqRandomReader(_fns[0]) is_fq = True else: if not fd.isIndexed: # Must be indexed FASTA, or exactly contains one FASTQ file raise IOError( "%s must contain either indexed FASTA files or " % isoform_filename + "contain exactly one FASTQ file!") else: raise IOError("Unable to recognize file type of %s." % isoform_filename) fa_out_fn, fq_out_fn, ds_out_fn = None, None, None _prefix, _suffix = parse_ds_filename(output_filename) if _suffix == "fasta": fa_out_fn = output_filename elif _suffix == "fastq": if not is_fq: raise ValueError("Input file %s is not FASTQ while output is." % isoform_filename) else: fq_out_fn = output_filename elif _suffix == "contigset.xml": # output is contigset.xml ds_out_fn = output_filename fa_out_fn = _prefix + ".fasta" if is_fq: fq_out_fn = _prefix + ".fastq" else: raise IOError("Unable to recognize file type of %s." % output_filename) fa_writer = FastaWriter(fa_out_fn) if fa_out_fn is not None else None fq_writer = FastqWriter(fq_out_fn) if fq_out_fn is not None else None coords = {} for r in CollapseGffReader(gff_filename): tid = r.transcript_id coords[tid] = "{0}:{1}-{2}({3})".format(r.seqid, r.start, r.end, r.strand) if bad_gff_filename is not None: for r in CollapseGffReader(gff_filename): tid = r.transcript_id coords[tid] = "{0}:{1}-{2}({3})".format(r.seqid, r.start, r.end, r.strand) for group in GroupReader(group_filename): pb_id, members = group.name, group.members if not pb_id in coords: raise ValueError("Could not find %s in %s and %s" % (pb_id, gff_filename, bad_gff_filename)) #logging.info("Picking representative sequence for %s", pb_id) best_id = None best_seq = None best_qual = None best_err = 9999999 err = 9999999 max_len = 0 for x in members: if is_fq and pick_least_err_instead: err = sum(i**-(i / 10.) for i in fd[x].quality) if (is_fq and pick_least_err_instead and err < best_err) or \ ((not is_fq or not pick_least_err_instead) and len(fd[x].sequence) >= max_len): best_id = x best_seq = fd[x].sequence if is_fq: best_qual = fd[x].quality best_err = err max_len = len(fd[x].sequence) _id_ = "{0}|{1}|{2}".format(pb_id, coords[pb_id], best_id) _seq_ = best_seq if fq_writer is not None: fq_writer.writeRecord(_id_, _seq_, best_qual) if fa_writer is not None: fa_writer.writeRecord(_id_, _seq_) if fa_writer is not None: fa_writer.close() if fq_writer is not None: fq_writer.close() if ds_out_fn is not None: as_contigset(fa_out_fn, ds_out_fn)