Esempio n. 1
0
def main(fq1, fq2, output_prefix, abundance_filename):
    abundance = {}
    if abundance_filename is None:
        abundance = defaultdict(lambda: 1)
    else:
        with open(abundance_filename) as f:
            for line in f:
                _id, _count = line.strip().split('\t')
                abundance[_id] = int(_count)

    matchf = BowTieWriter(output_prefix + '.overlap.aligned')
    unf1 = FastqWriter(output_prefix + '.overlap.1.unaligned')
    unf2 = FastqWriter(output_prefix + '.overlap.2.unaligned')

    total = 0
    total_expanded = 0
    aligned = 0
    aligned_expanded = 0
    for r1, r2 in FastqReaderPaired(fq1, fq2):
        realid = r1['ID'][:r1['ID'].find('/')]
        total += 1
        total_expanded += abundance[realid]
        if find_overlap(r1, r2, matchf, unf1, unf2):  #overlap found
            aligned += 1
            aligned_expanded += abundance[realid]

    with open(output_prefix + '.overlap.log', 'w') as f:
        p = aligned * 100. / total
        f.write("# reads processed: {0}\n".format(total))
        f.write(
            "# reads with at least one reported alignment: {0} ({1:.2f}%)\n".
            format(aligned, p))
        f.write("# reads that failed to align: {0} ({1:.2f}%)\n".format(
            total - aligned, 100 - p))
        f.write("Reported {0} paired-end alignments to 1 output stream(s)\n".
                format(aligned))

    with open(output_prefix + '.overlap.log_expanded', 'w') as f:
        p = aligned_expanded * 100. / total_expanded
        f.write("# reads processed: {0}\n".format(total_expanded))
        f.write(
            "# reads with at least one reported alignment: {0} ({1:.2f}%)\n".
            format(aligned_expanded, p))
        f.write("# reads that failed to align: {0} ({1:.2f}%)\n".format(
            total_expanded - aligned_expanded, 100 - p))
        f.write("Reported {0} paired-end alignments to 1 output stream(s)\n".
                format(aligned_expanded))

    matchf.close()
    unf1.close()
    unf2.close()
def main(fq1, fq2, output_prefix, abundance_filename):
	abundance = {}
	if abundance_filename is None:
		abundance = defaultdict(lambda: 1)
	else:	
		with open(abundance_filename) as f:
			for line in f:
				_id, _count = line.strip().split('\t')
				abundance[_id] = int(_count)
			
	matchf = BowTieWriter(output_prefix + '.overlap.aligned')
	unf1 = FastqWriter(output_prefix + '.overlap.1.unaligned')
	unf2 = FastqWriter(output_prefix + '.overlap.2.unaligned')

	total = 0
	total_expanded = 0
	aligned = 0
	aligned_expanded = 0
	for r1, r2 in FastqReaderPaired(fq1, fq2):
		realid = r1['ID'][:r1['ID'].find('/')]
		total += 1
		total_expanded += abundance[realid]
		if find_overlap(r1, r2, matchf, unf1, unf2): #overlap found
			aligned += 1
			aligned_expanded += abundance[realid]
	
	with open(output_prefix + '.overlap.log', 'w') as f:
		p = aligned*100./total
		f.write("# reads processed: {0}\n".format(total))
		f.write("# reads with at least one reported alignment: {0} ({1:.2f}%)\n".format(aligned,p))
		f.write("# reads that failed to align: {0} ({1:.2f}%)\n".format(total-aligned,100-p))
		f.write("Reported {0} paired-end alignments to 1 output stream(s)\n".format(aligned))

	with open(output_prefix + '.overlap.log_expanded', 'w') as f:
		p = aligned_expanded*100./total_expanded
		f.write("# reads processed: {0}\n".format(total_expanded))
		f.write("# reads with at least one reported alignment: {0} ({1:.2f}%)\n".format(aligned_expanded,p))
		f.write("# reads that failed to align: {0} ({1:.2f}%)\n".format(total_expanded-aligned_expanded,100-p))
		f.write("Reported {0} paired-end alignments to 1 output stream(s)\n".format(aligned_expanded))


	matchf.close()
	unf1.close()
	unf2.close()
def remove_high_expected_error_PE(file1, file2, max_expected_error):
	"""
	Remove all reads where the expected error (sum of err probs from phred scores)
	exceeds <max_expected_error>
	"""
	assert os.path.exists(file1) and os.path.exists(file2)
	os.system("rm {0}.experror_*".format(file1))
	os.system("rm {0}.experror_*".format(file2))
	hgood1 = BowTieWriter(file1 + '.experror_good')
	hgood2 = BowTieWriter(file2 + '.experror_good')
	hbad1  = BowTieWriter(file1 + '.experror_bad')
	hbad2  = BowTieWriter(file2 + '.experror_bad')
	hlog = open(file1 + '.experror.log', 'w')
	start_t = time.time()
	good, bad = 0,0
	for r1, r2 in itertools.izip(BowTieReader(file1, False), BowTieReader(file2, False)):
		if sum(10**-((ord(x)-33)/10.) for x in r1['qual']) <= max_expected_error and \
		sum(10**-((ord(x)-33)/10.) for x in r2['qual']) <= max_expected_error:
			hgood1.write(r1)
			hgood2.write(r2)
			good += 1
		else:
			hbad1.write(r1)
			hbad2.write(r2)
			bad += 1
	hlog.write("Expected error filtering took {0} sec.\n".format(time.time()-start_t))
	hlog.write("Max allowed expected error: {0}\n".format(max_expected_error))
	hlog.write("# of original reads: {0}\n".format(good+bad))
	hlog.write("# of reads removed: {0} ({1:.2f})\n".format(bad,bad*1./(good+bad)))
	hlog.write("# of reads remaining: {0} ({1:.2f})\n".format(good,good*1./(good+bad)))

	hgood1.close()
	hgood2.close()
	hbad1.close()
	hbad2.close()
	hlog.close()
	os.system("gzip " + hgood1.f.name)
	os.system("gzip " + hgood2.f.name)
	os.system("gzip " + hbad1.f.name)
	os.system("gzip " + hbad2.f.name)
Esempio n. 4
0
	options, args = parser.parse_args()
	
	if options.mutate_freq is None:
		mutate_freq = {'A':defaultdict(lambda:1/3.),\
					'T':defaultdict(lambda:1/3.),\
					'C':defaultdict(lambda:1/3.),\
					'G':defaultdict(lambda:1/3.)}
	else:
		mutate_freq = load(open(options.mutate_freq))
		for k, v in mutate_freq.iteritems():
			assert sum(v.itervalues()) == 1.

	reader = BowTieReader(options.input, is_paired=True)
	print >> sys.stderr, "calculating base frequencies"
	base_freq_pickle = options.input + ".base_freq.pickle"
	if os.path.exists(base_freq_pickle):
		base_freq = load(open(base_freq_pickle))
	else:
		base_freq = reader.get_base_frequency()
		with open(options.input + ".base_freq.pickle", 'w') as f:
			dump(base_freq, f)
	print >> sys.stderr, "reading bowtie aligned file..."
	writer = BowTieWriter(options.output)
	for r1, r2 in reader:
		# remove /1 from r1 if there is one, same with /2 from r2
		while r1['ID'].endswith('/1') and r2['ID'].endswith('/2'):
			r1['ID'] = r1['ID'][:-2]
			r2['ID'] = r2['ID'][:-2]
		seq, qual, overlap = compose2(r1, r2, base_freq, mutate_freq)
		writer.write_composite(r1, r2, seq, qual, overlap)
Esempio n. 5
0
    options, args = parser.parse_args()

    if options.mutate_freq is None:
        mutate_freq = {'A':defaultdict(lambda:1/3.),\
           'T':defaultdict(lambda:1/3.),\
           'C':defaultdict(lambda:1/3.),\
           'G':defaultdict(lambda:1/3.)}
    else:
        mutate_freq = load(open(options.mutate_freq))
        for k, v in mutate_freq.iteritems():
            assert sum(v.itervalues()) == 1.

    reader = BowTieReader(options.input, is_paired=True)
    print >> sys.stderr, "calculating base frequencies"
    base_freq_pickle = options.input + ".base_freq.pickle"
    if os.path.exists(base_freq_pickle):
        base_freq = load(open(base_freq_pickle))
    else:
        base_freq = reader.get_base_frequency()
        with open(options.input + ".base_freq.pickle", 'w') as f:
            dump(base_freq, f)
    print >> sys.stderr, "reading bowtie aligned file..."
    writer = BowTieWriter(options.output)
    for r1, r2 in reader:
        # remove /1 from r1 if there is one, same with /2 from r2
        while r1['ID'].endswith('/1') and r2['ID'].endswith('/2'):
            r1['ID'] = r1['ID'][:-2]
            r2['ID'] = r2['ID'][:-2]
        seq, qual, overlap = compose2(r1, r2, base_freq, mutate_freq)
        writer.write_composite(r1, r2, seq, qual, overlap)
Esempio n. 6
0
def detect_primers_PE(input1, input2, output_prefix, f_primer, r_primer, min_match_len, max_mm, max_de, max_in):
	"""
	NOTE: this is for paired end reads that comes in two separate files
	ex: DS19342_CTTGTA_L006_R1_001.fastq.gz and DS19342_CTTGTA_L006_R2_001.fastq.gz
	
	Given a pair of reads from input1, input2:
	1. Detect that F primer exists in one read and R primer in the other
	2. If both reads pass primer detection, output
	3. Otherwise, discard
	
	Output:  <output_prefix>.{F|R}primer_good
	         <output_prefix>.primer.bad
	         <output_prefix>.primer.log
	"""
	def process_primer(r, match_len, is_reverse):
		# get record into miscBowTie.BowTieReader format 
		# strip away primers from seq & qual, properly rev comp!
		r['offset'] = match_len
		r['seq'] = r['seq'][match_len:]
		r['qual'] = r['qual'][match_len:]
		r['ref'] = 'NA'
		if is_reverse:
			r['seq'] = Seq(r['seq']).reverse_complement().tostring()
			r['qual'] = r['qual'][::-1]
	
	os.system("rm {0}.*primer_*".format(output_prefix))
	Fgood = BowTieWriter(output_prefix + '.Fprimer_good')
	Rgood = BowTieWriter(output_prefix + '.Rprimer_good')
	hbad1 = FastqWriter(output_prefix + '.primer_bad.1')
	hbad2 = FastqWriter(output_prefix + '.primer_bad.2')
	hverbose = open(output_prefix + '.primer.verbose', 'w')
	hlog = open(output_prefix + '.primer.log', 'w')
	start_t = time.time()
	good, bad = 0,0
	
	pmF = PrimerMatch(f_primer)
	pmR = PrimerMatch(r_primer)

	for r1, r2 in itertools.izip(FastqReader(input1), FastqReader(input2)):
		# NOTE: in the case of PE reads
		#       regardless of whether we're matching for F or R primer
		#       they would all appear at the 5' end of the read
		#       which is why we call match_primer_len with is_reverse = False
		match_f_len1, mmf1 = match_primer_len(r1['seq'], f_primer, max_mm, min_match_len, False)
		match_r_len1, mmr1 = match_primer_len(r1['seq'], r_primer, max_mm, min_match_len, False)
		match_f_len2, mmf2 = match_primer_len(r2['seq'], f_primer, max_mm, min_match_len, False)
		match_r_len2, mmr2 = match_primer_len(r2['seq'], r_primer, max_mm, min_match_len, False)
		#match_f_len1 = match_f_len2 =match_r_len1=match_r_len2=0
		if match_f_len1 > 0 and match_r_len2 > 0:
			# case 1, read 1 is F, read 2 is R
			good += 1
			process_primer(r1, match_f_len1, False)
			Fgood.write(r1)
			process_primer(r2, match_r_len2, False)
			Rgood.write(r2)
		elif match_f_len2 > 0 and match_r_len1 > 0:
			# case 2, read 1 is R, case 2 is F
			good += 1
			process_primer(r2, match_f_len2, False)
			Fgood.write(r2)
			process_primer(r1, match_r_len1, False)
			Rgood.write(r1)
		else:
			pmF.make_suffix(r1['seq'])
			pmF.match(min_match_len, max_mm, max_in, max_de)
			if pmF.match_result is not None: 
				pmR.make_suffix(r2['seq'])
				pmR.match(min_match_len, max_mm, max_in, max_de)
				if pmR.match_result is not None:  # case 1, read 1 is F, read 2 is R
					good += 1
					process_primer(r1, pmF.match_result.match_len, False)
					Fgood.write(r1)
					hverbose.write("{0}\t{1}\t{2}\n".format(r1['ID'], pmF.match_result.match_len, pmF.match_result.miss))
					process_primer(r2, pmR.match_result.match_len, False)
					Rgood.write(r2)
					hverbose.write("{0}\t{1}\t{2}\n".format(r2['ID'], pmR.match_result.match_len, pmR.match_result.miss))
				else:
					hbad1.write(r1)
					hbad2.write(r2)
					bad += 1
			else:
				pmR.make_suffix(r1['seq'])
				pmR.match(min_match_len, max_mm, max_in, max_de)
				if pmR.match_result is not None:
					pmF.make_suffix(r2['seq'])
					pmF.match(min_match_len, max_mm, max_in, max_de)
					if pmF.match_result is not None:
						good += 1
						# case 2, read 1 is R, read 2 is F
						process_primer(r2, pmF.match_result.match_len, False)
						hverbose.write("{0}\t{1}\t{2}\n".format(r2['ID'], pmF.match_result.match_len, pmF.match_result.miss))
						Fgood.write(r2)
						process_primer(r1, pmR.match_result.match_len, False)
						Rgood.write(r1)
						hverbose.write("{0}\t{1}\t{2}\n".format(r1['ID'], pmR.match_result.match_len, pmR.match_result.miss))
					else:
						# case 3: unresolved, bad read pair
						hbad1.write(r1)
						hbad2.write(r2)
						bad += 1

	hlog.write("Input 1: {0}\nInput 2: {1}\n".format(input1, input2))
	hlog.write("F primer: {0}\nR primer: {1}\n".format(f_primer, r_primer))
	hlog.write("Min match len: {0}\n".format(min_match_len))
	hlog.write("Max mismatch: {0}\n".format(max_mm))
	hlog.write("Max deletion: {0}\n".format(max_de))
	hlog.write("Max insertion: {0}\n".format(max_in))
	hlog.write("Primer detection and removal took {0} sec.\n".format(time.time()-start_t))
	hlog.write("# of original reads: {0}\n".format(good+bad))
	hlog.write("# of reads removed: {0} ({1:.2f})\n".format(bad,bad*1./(good+bad)))
	hlog.write("# of reads remaining: {0} ({1:.2f})\n".format(good,good*1./(good+bad)))


	Fgood.close()
	Rgood.close()
	hbad1.close()
	hbad2.close()
	hlog.close()
	hverbose.close()
	os.system("gzip " + Fgood.f.name)
	os.system("gzip " + Rgood.f.name)
	os.system("gzip " + hbad1.f.name)
	os.system("gzip " + hbad2.f.name)
	os.system("gzip " + hverbose.name)