def merge_mapping_pathways(self): sam_file = pysam.Samfile(self.file_names['clean_bam']) alignment_sorter = sam.AlignmentSorter(sam_file.references, sam_file.lengths, self.file_names['merged_mappings'], ) with alignment_sorter: for trimmed in self.process_full_length_mappings(): alignment_sorter.write(trimmed) for extended in self.process_remapped(): alignment_sorter.write(extended) lengths = self.read_file('lengths') merged_mapping_lengths = lengths['clean_trimmed'] + lengths['remapped'] self.write_file('lengths', {'merged_mapping': merged_mapping_lengths})
def extend_polyA_ends(bam_fn, extended_bam_fn, genome_dir, trimmed_twice=False): bam_file = pysam.Samfile(bam_fn) region_fetcher = genomes.build_region_fetcher( genome_dir, load_references=True, sam_file=bam_file, ) # Adding bases to the end of minus strand mappings produces a file # that is not necessarily sorted, so re-sort. alignment_sorter = sam.AlignmentSorter( bam_file.references, bam_file.lengths, extended_bam_fn, ) with alignment_sorter: for mapping in bam_file: extended_mapping = extend_polyA_end(mapping, region_fetcher, trimmed_twice) alignment_sorter.write(extended_mapping)
def combine_mappings(self): num_unmapped = 0 num_nonunique = 0 num_unique = 0 mappings = pysam.Samfile(self.file_names['accepted_hits']) unmapped = pysam.Samfile(self.file_names['unmapped_bam']) merged = sam.merge_by_name(mappings, unmapped) grouped = utilities.group_by(merged, lambda m: m.qname) alignment_sorter = sam.AlignmentSorter( mappings.references, mappings.lengths, self.file_names['bam'], ) with alignment_sorter: for qname, group in grouped: unmapped = any(m.is_unmapped for m in group) if unmapped: num_unmapped += 1 continue nonunique = len(group) > 1 or any(m.mapq < 40 for m in group) if nonunique: num_nonunique += 1 else: num_unique += 1 for mapping in group: alignment_sorter.write(mapping) self.summary.extend([ ('Unmapped', num_unmapped), ('Nonunique', num_nonunique), ('Unique', num_unique), ], )
def filter_mappings(self): num_unmapped = 0 num_entirely_genomic = 0 num_nonunique = 0 num_unique = 0 nongenomic_lengths = Counter() sam_file = pysam.Samfile(self.file_names['accepted_hits']) region_fetcher = genomes.build_region_fetcher( self.file_names['genome'], load_references=True, sam_file=sam_file, ) extended_sorter = sam.AlignmentSorter( sam_file.references, sam_file.lengths, self.file_names['extended'], ) filtered_sorter = sam.AlignmentSorter( sam_file.references, sam_file.lengths, self.file_names['extended_filtered'], ) extended_mappings = (trim.extend_polyA_end(mapping, region_fetcher) for mapping in sam_file) mapping_groups = utilities.group_by(extended_mappings, lambda m: m.qname) with extended_sorter, filtered_sorter: for qname, group in mapping_groups: for m in group: extended_sorter.write(m) min_nongenomic_length = min( trim.get_nongenomic_length(m) for m in group) nongenomic_lengths[min_nongenomic_length] += 1 if min_nongenomic_length == 0: num_entirely_genomic += 1 continue nonunique = len(group) > 1 or any(m.mapq < 40 for m in group) if nonunique: num_nonunique += 1 continue num_unique += 1 for m in group: filtered_sorter.write(m) self.summary.extend([ ('Mapped with no non-genomic A\'s', num_entirely_genomic), ('Nonunique', num_nonunique), ('Unique', num_unique), ], ) nongenomic_lengths = utilities.counts_to_array(nongenomic_lengths) self.write_file('nongenomic_lengths', nongenomic_lengths)
def align_reads( target_fasta_fn, reads, bam_fn, min_path_length=15, error_fn='/dev/null', alignment_type='overlap', ): ''' Aligns reads to targets in target_fasta_fn by Smith-Waterman, storing alignments in bam_fn and yielding unaligned reads. ''' targets = {r.name: r.seq for r in fasta.reads(target_fasta_fn)} target_names = sorted(targets) target_lengths = [len(targets[n]) for n in target_names] alignment_sorter = sam.AlignmentSorter( target_names, target_lengths, bam_fn, ) statistics = Counter() with alignment_sorter: for original_read in reads: statistics['input'] += 1 alignments = [] rc_read = fastq.Read( original_read.name, utilities.reverse_complement(original_read.seq), original_read.qual[::-1], ) for read, is_reverse in ([original_read, False], [rc_read, True]): qual = fastq.decode_sanger(read.qual) for target_name, target_seq in targets.iteritems(): alignment = generate_alignments(read.seq, target_seq, alignment_type)[0] path = alignment['path'] if len(path) >= min_path_length and alignment['score'] / ( 2. * len(path)) > 0.8: aligned_segment = pysam.AlignedSegment() aligned_segment.seq = read.seq aligned_segment.query_qualities = qual aligned_segment.is_reverse = is_reverse char_pairs = make_char_pairs(path, read.seq, target_seq) cigar = sam.aligned_pairs_to_cigar(char_pairs) clip_from_start = first_query_index(path) if clip_from_start > 0: cigar = [(sam.BAM_CSOFT_CLIP, clip_from_start) ] + cigar clip_from_end = len( read.seq) - 1 - last_query_index(path) if clip_from_end > 0: cigar = cigar + [ (sam.BAM_CSOFT_CLIP, clip_from_end) ] aligned_segment.cigar = cigar read_aligned, ref_aligned = zip(*char_pairs) md = sam.alignment_to_MD_string( ref_aligned, read_aligned) aligned_segment.set_tag('MD', md) aligned_segment.set_tag('AS', alignment['score']) aligned_segment.tid = alignment_sorter.get_tid( target_name) aligned_segment.query_name = read.name aligned_segment.next_reference_id = -1 aligned_segment.reference_start = first_target_index( path) alignments.append(aligned_segment) if alignments: statistics['aligned'] += 1 sorted_alignments = sorted(alignments, key=lambda m: m.get_tag('AS'), reverse=True) grouped = utilities.group_by(sorted_alignments, key=lambda m: m.get_tag('AS')) _, highest_group = grouped.next() primary_already_assigned = False for alignment in highest_group: if len(highest_group) == 1: alignment.mapping_quality = 2 else: alignment.mapping_quality = 1 if not primary_already_assigned: primary_already_assigned = True else: alignment.is_secondary = True alignment_sorter.write(alignment) else: statistics['unaligned'] += 1 yield read with open(error_fn, 'w') as error_fh: for key in ['input', 'aligned', 'unaligned']: error_fh.write('{0}: {1:,}\n'.format(key, statistics[key]))
def combine_mappings(self): num_unmapped = 0 num_five_unmapped = 0 num_three_unmapped = 0 num_nonunique = 0 num_discordant = 0 num_concordant = 0 five_prime_mappings = pysam.Samfile(self.file_names['five_prime_accepted_hits']) five_prime_unmapped = pysam.Samfile(self.file_names['five_prime_unmapped']) all_five_prime = sam.merge_by_name(five_prime_mappings, five_prime_unmapped) five_prime_grouped = utilities.group_by(all_five_prime, lambda m: m.qname) three_prime_mappings = pysam.Samfile(self.file_names['three_prime_accepted_hits']) three_prime_unmapped = pysam.Samfile(self.file_names['three_prime_unmapped']) all_three_prime = sam.merge_by_name(three_prime_mappings, three_prime_unmapped) three_prime_grouped = utilities.group_by(all_three_prime, lambda m: m.qname) group_pairs = izip(five_prime_grouped, three_prime_grouped) alignment_sorter = sam.AlignmentSorter(five_prime_mappings.references, five_prime_mappings.lengths, self.file_names['combined_extended'], ) region_fetcher = genomes.build_region_fetcher(self.file_names['genome'], load_references=True, sam_file=five_prime_mappings, ) with alignment_sorter: for (five_qname, five_group), (three_qname, three_group) in group_pairs: five_annotation = trim.PayloadAnnotation.from_identifier(five_qname) three_annotation = trim.PayloadAnnotation.from_identifier(three_qname) if five_annotation['original_name'] != three_annotation['original_name']: # Ensure that the iteration through pairs is in sync. print five_qname, three_qname raise ValueError five_unmapped = any(m.is_unmapped for m in five_group) three_unmapped = any(m.is_unmapped for m in three_group) if five_unmapped: num_five_unmapped += 1 if three_unmapped: num_three_unmapped += 1 if five_unmapped or three_unmapped: num_unmapped += 1 continue five_nonunique = len(five_group) > 1 or any(m.mapq < 40 for m in five_group) three_nonunique = len(three_group) > 1 or any(m.mapq < 40 for m in three_group) if five_nonunique or three_nonunique: num_nonunique += 1 continue five_m = five_group.pop() three_m = three_group.pop() five_strand = '-' if five_m.is_reverse else '+' three_strand = '-' if three_m.is_reverse else '+' tlen = max(five_m.aend, three_m.aend) - min(five_m.pos, three_m.pos) discordant = (five_m.tid != three_m.tid) or (five_strand) != (three_strand) or (tlen > 10000) if discordant: num_discordant += 1 continue if five_strand == '+': first_read = five_m second_read = three_m elif five_strand == '-': first_read = three_m second_read = five_m gap = second_read.pos - first_read.aend if gap < 0: num_discordant += 1 continue combined_read = pysam.AlignedRead() # qname needs to come from three_m to include trimmed As combined_read.qname = three_m.qname combined_read.tid = five_m.tid combined_read.seq = first_read.seq + second_read.seq combined_read.qual = first_read.qual + second_read.qual combined_read.cigar = first_read.cigar + [(3, gap)] + second_read.cigar combined_read.pos = first_read.pos combined_read.is_reverse = first_read.is_reverse combined_read.mapq = min(first_read.mapq, second_read.mapq) combined_read.rnext = -1 combined_read.pnext = -1 num_concordant += 1 extended_mapping = trim.extend_polyA_end(combined_read, region_fetcher, ) alignment_sorter.write(extended_mapping) self.summary.extend( [('Unmapped', num_unmapped), ('Five prime unmapped', num_five_unmapped), ('Three prime unmapped', num_three_unmapped), ('Nonunique', num_nonunique), ('Discordant', num_discordant), ('Concordant', num_concordant), ], )
def combine_mappings(self): num_unmapped = 0 num_R1_unmapped = 0 num_R2_unmapped = 0 num_nonunique = 0 num_discordant = 0 num_disoriented = 0 num_concordant = 0 tlens = Counter() R1_mappings = pysam.Samfile(self.file_names['R1_accepted_hits']) R1_unmapped = pysam.Samfile(self.file_names['R1_unmapped']) all_R1 = sam.merge_by_name(R1_mappings, R1_unmapped) R1_grouped = utilities.group_by(all_R1, lambda m: m.qname) R2_mappings = pysam.Samfile(self.file_names['R2_accepted_hits']) R2_unmapped = pysam.Samfile(self.file_names['R2_unmapped']) all_R2 = sam.merge_by_name(R2_mappings, R2_unmapped) R2_grouped = utilities.group_by(all_R2, lambda m: m.qname) group_pairs = izip(R1_grouped, R2_grouped) alignment_sorter = sam.AlignmentSorter(R1_mappings.references, R1_mappings.lengths, self.file_names['combined'], ) with alignment_sorter: for (R1_qname, R1_group), (R2_qname, R2_group) in group_pairs: #print R1_qname, R2_qname if fastq.get_pair_name(R1_qname) != fastq.get_pair_name(R2_qname): # Ensure that the iteration through pairs is in sync. print R1_qname, R2_qname raise ValueError R1_unmapped = any(m.is_unmapped for m in R1_group) R2_unmapped = any(m.is_unmapped for m in R2_group) if R1_unmapped: num_R1_unmapped += 1 if R2_unmapped: num_R2_unmapped += 1 if R1_unmapped or R2_unmapped: num_unmapped += 1 continue R1_nonunique = len(R1_group) > 1 or any(m.mapq < 40 for m in R1_group) R2_nonunique = len(R2_group) > 1 or any(m.mapq < 40 for m in R2_group) if R1_nonunique or R2_nonunique: num_nonunique += 1 continue R1_m = R1_group.pop() R2_m = R2_group.pop() R1_strand = sam.get_strand(R1_m) R2_strand = sam.get_strand(R2_m) tlen = max(R1_m.aend, R2_m.aend) - min(R1_m.pos, R2_m.pos) discordant = (R1_m.tid != R2_m.tid) or (R1_strand) == (R2_strand) or (tlen > 10000) if discordant: num_discordant += 1 continue # Reminder: the protocol produces anti-sense reads. if R1_strand == '-': if R1_m.pos < R2_m.pos: num_disoriented += 1 continue elif R1_strand == '+': if R2_m.pos < R1_m.pos: num_disoriented += 1 continue combined_read = paired_end.combine_paired_mappings(R1_m, R2_m) tlens[tlen] += 1 if combined_read: # Flip combined_read back to the sense strand. if combined_read.is_reverse: combined_read.is_reverse = False else: combined_read.is_reverse = True trim.set_nongenomic_length(combined_read, 0) alignment_sorter.write(combined_read) num_concordant += 1 self.summary.extend( [('Unmapped', num_unmapped), ('R1 unmapped', num_R1_unmapped), ('R2 unmapped', num_R2_unmapped), ('Nonunique', num_nonunique), ('Discordant', num_discordant), ('Unexpected orientation', num_disoriented), ('Concordant', num_concordant), ], ) tlens = utilities.counts_to_array(tlens) self.write_file('tlens', tlens)
def post_filter( input_bam_fn, gff_fn, clean_bam_fn, more_rRNA_bam_fn, tRNA_bam_fn, other_ncRNA_bam_fn, ): ''' Removes any remaining mappings to rRNA transcripts and any mappings to tRNA or other noncoding RNA transcripts. If a read has any mapping to an rRNA transcript, write all such mappings to more_rRNA_bam_fn with exactly one flagged primary. If a read has any mapping to a tRNA transcript, write all such mappings to tRNA_bam_fn, with exactly one flagged primary only if there were no rRNA mappings. If a read has any mapping to any other noncoding RNA transcript, write all such mappings to other_ncRNA_bam_fn, with exactly one flagged only if there were no rRNA or tRNA mappings. Write all reads with no mappings to any noncoding RNA to clean_bam_fn. ''' contaminant_qnames = set() rRNA_transcripts, tRNA_transcripts, other_ncRNA_transcripts = gff.get_noncoding_RNA_transcripts( gff_fn) input_bam_file = pysam.Samfile(input_bam_fn) # Find reads with any mappings that overlap rRNA or tRNA transcripts and write any # such mappings to a contaminant bam file. for transcripts, bam_fn in [ (rRNA_transcripts, more_rRNA_bam_fn), (tRNA_transcripts, tRNA_bam_fn), (other_ncRNA_transcripts, other_ncRNA_bam_fn), ]: alignment_sorter = sam.AlignmentSorter( input_bam_file.references, input_bam_file.lengths, bam_fn, ) with alignment_sorter: for transcript in transcripts: transcript.build_coordinate_maps() overlapping_mappings = input_bam_file.fetch( transcript.seqname, transcript.start, transcript.end, ) for mapping in overlapping_mappings: # Confirm that there is at least one base from the read # mapped to a position in the transcript (i.e. it isn't just # a spliced read whose junction contains the transcript). if any(p in transcript.genomic_to_transcript and 0 <= transcript.genomic_to_transcript[p] < transcript.transcript_length for p in mapping.positions): if mapping.qname not in contaminant_qnames: # This is the first time seeing this qname, so flag # it as primary. mapping.is_secondary = False contaminant_qnames.add(mapping.qname) else: # This qname has already been seen, so flag it as # secondary. mapping.is_secondary = True alignment_sorter.write(mapping) input_bam_file.close() # Create a new clean bam file consisting of all mappings of each # read that wasn't flagged as a contaminant. input_bam_file = pysam.Samfile(input_bam_fn, 'rb') with pysam.Samfile(clean_bam_fn, 'wb', template=input_bam_file) as clean_bam_file: for mapping in input_bam_file: if mapping.qname not in contaminant_qnames: clean_bam_file.write(mapping)