Пример #1
0
def combine_quiver_results(split_dirs, output_dir, hq_filename, lq_filename, tofu_prefix=''):
    """
    For each size bin result, ex: clusterOut/0to2k/all.quiveredXXX.fastq
    combine it together, remember to add a prefix (ex: i0|c12, i1|c13/....)
    """
    prefix_dict_hq  = {}
    prefix_dict_lq = {}
    fout_hq = FastqWriter(os.path.join(output_dir, 'all_sizes.quivered_hq.fastq'))
    fout_lq = FastqWriter(os.path.join(output_dir, 'all_sizes.quivered_lq.fastq'))
    for i,d in enumerate(split_dirs):
        file_hq = os.path.join(d, hq_filename) #'all_quivered_hq.100_30_0.99.fastq')
        file_lq = os.path.join(d, lq_filename) #'all_quivered_lq.fastq')
        print >> sys.stderr, "Adding prefix i{0}| to {1},{2}...".format(i, file_hq, file_lq)
        prefix_dict_hq["i{i}HQ_{p}".format(i=i,p=tofu_prefix)] = os.path.abspath(d)
        prefix_dict_lq["i{i}LQ_{p}".format(i=i,p=tofu_prefix)] = os.path.abspath(d)
        for r in FastqReader(file_hq):
            _name_ = "i{i}HQ_{p}|{n}".format(p=tofu_prefix, i=i, n=r.name)
            fout_hq.writeRecord(_name_, r.sequence, r.quality)
        for r in FastqReader(file_lq):
            _name_ = "i{i}LQ_{p}|{n}".format(p=tofu_prefix, i=i, n=r.name)
            fout_lq.writeRecord(_name_, r.sequence, r.quality)
    fout_hq.close()
    fout_lq.close()
    print >> sys.stderr, "HQ quivered output combined to:", fout_hq.file.name
    print >> sys.stderr, "LQ quivered output combined to:", fout_lq.file.name
    return fout_hq.file.name,fout_lq.file.name,prefix_dict_hq,prefix_dict_lq
Пример #2
0
def combine_quiver_results(split_dirs, output_dir, hq_filename, lq_filename, tofu_prefix=''):
    """
    For each size bin result, ex: clusterOut/0to2k/all.quiveredXXX.fastq
    combine it together, remember to add a prefix (ex: i0|c12, i1|c13/....)
    """
    prefix_dict_hq  = {}
    prefix_dict_lq = {}
    fout_hq = FastqWriter(os.path.join(output_dir, 'all_sizes.quivered_hq.fastq'))
    fout_lq = FastqWriter(os.path.join(output_dir, 'all_sizes.quivered_lq.fastq'))
    for i,d in enumerate(split_dirs):
        file_hq = os.path.join(d, hq_filename) #'all_quivered_hq.100_30_0.99.fastq')
        file_lq = os.path.join(d, lq_filename) #'all_quivered_lq.fastq')
        print >> sys.stderr, "Adding prefix i{0}| to {1},{2}...".format(i, file_hq, file_lq)
        prefix_dict_hq["i{i}HQ_{p}".format(i=i,p=tofu_prefix)] = os.path.abspath(d)
        prefix_dict_lq["i{i}LQ_{p}".format(i=i,p=tofu_prefix)] = os.path.abspath(d)
        for r in FastqReader(file_hq):
            _name_ = "i{i}HQ_{p}|{n}".format(p=tofu_prefix, i=i, n=r.name)
            fout_hq.writeRecord(_name_, r.sequence, r.quality)
        for r in FastqReader(file_lq):
            _name_ = "i{i}LQ_{p}|{n}".format(p=tofu_prefix, i=i, n=r.name)
            fout_lq.writeRecord(_name_, r.sequence, r.quality)
    fout_hq.close()
    fout_lq.close()
    print >> sys.stderr, "HQ quivered output combined to:", fout_hq.file.name
    print >> sys.stderr, "LQ quivered output combined to:", fout_lq.file.name
    return fout_hq.file.name,fout_lq.file.name,prefix_dict_hq,prefix_dict_lq
def pick_rep(fa_fq_filename,
             gff_filename,
             group_filename,
             output_filename,
             is_fq=False,
             pick_least_err_instead=True):
    """
    For each group, select the representative record

    If is FASTA file (is_fa False) -- then always pick the longest one
    If is FASTQ file (is_fq True) -- then 
          If pick_least_err_instead is True, pick the one w/ least number of expected base errors
          Else, pick the longest one
    """
    if is_fq:
        fd = LazyFastqReader(fa_fq_filename)
        fout = FastqWriter(output_filename)
    else:
        fd = LazyFastaReader(fa_fq_filename)
        fout = FastaWriter(output_filename)

    coords = {}
    for line in open(gff_filename):
        # ex: chr1    PacBio  transcript      27567   29336   .       -       .       gene_id "PB.1"; transcript_id "PB.1.1";
        raw = line.strip().split('\t')
        if raw[2] == 'transcript':
            tid = raw[-1].split('; ')[1].split()[1][1:-2]
            coords[tid] = "{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4],
                                                    raw[6])

    for line in open(group_filename):
        pb_id, members = line.strip().split('\t')
        print >> sys.stderr, "Picking representative sequence for", pb_id
        best_id = None
        best_seq = None
        best_qual = None
        best_err = 9999999
        err = 9999999
        max_len = 0
        for x in members.split(','):
            if is_fq and pick_least_err_instead:
                err = sum(i**-(i / 10.) for i in fd[x].quality)
            if (is_fq and pick_least_err_instead and err < best_err) or (
                (not is_fq or not pick_least_err_instead)
                    and len(fd[x].sequence) >= max_len):
                best_id = x
                best_seq = fd[x].sequence
                if is_fq:
                    best_qual = fd[x].quality
                    best_err = err
                max_len = len(fd[x].sequence)

        _id_ = "{0}|{1}|{2}".format(pb_id, coords[pb_id], best_id)
        _seq_ = best_seq
        if is_fq:
            fout.writeRecord(_id_, _seq_, best_qual)
        else:
            fout.writeRecord(_id_, _seq_)
    fout.close()
def pick_rep(fa_fq_filename, gff_filename, group_filename, output_filename, is_fq=False, pick_least_err_instead=False, bad_gff_filename=None):
    """
    For each group, select the representative record

    If is FASTA file (is_fa False) -- then always pick the longest one
    If is FASTQ file (is_fq True) -- then 
          If pick_least_err_instead is True, pick the one w/ least number of expected base errors
          Else, pick the longest one
    """
    if is_fq:
        fd = LazyFastqReader(fa_fq_filename)
        fout = FastqWriter(output_filename)
    else:
        fd = LazyFastaReader(fa_fq_filename)
        fout = FastaWriter(output_filename)

    coords = {}
    for line in open(gff_filename):
        # ex: chr1    PacBio  transcript      27567   29336   .       -       .       gene_id "PB.1"; transcript_id "PB.1.1";
        raw = line.strip().split('\t')
        if raw[2] == 'transcript': 
            tid = raw[-1].split('; ')[1].split()[1][1:-2]
            coords[tid] = "{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])

    if bad_gff_filename is not None:
        for line in open(bad_gff_filename):
            raw = line.strip().split('\t')
            if raw[2] == 'transcript':
                tid = raw[-1].split('; ')[1].split()[1][1:-2]
                coords[tid] = "{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])

    for line in open(group_filename):
        pb_id, members = line.strip().split('\t')
        print >> sys.stderr, "Picking representative sequence for", pb_id
        best_id = None
        best_seq = None
        best_qual = None
        best_err = 9999999
        err = 9999999
        max_len = 0
        for x in members.split(','):
            if is_fq and pick_least_err_instead:
                err = sum(i**-(i/10.) for i in fd[x].quality)
            if (is_fq and pick_least_err_instead and err < best_err) or ((not is_fq or not pick_least_err_instead) and len(fd[x].sequence) >= max_len):
                best_id = x
                best_seq = fd[x].sequence
                if is_fq:
                    best_qual = fd[x].quality
                    best_err = err
                max_len = len(fd[x].sequence)

        _id_ = "{0}|{1}|{2}".format(pb_id, coords[pb_id], best_id)
        _seq_ = best_seq
        if is_fq:
            fout.writeRecord(_id_, _seq_, best_qual)
        else:
            fout.writeRecord(_id_, _seq_)
    fout.close()
Пример #5
0
class QualityFilter(object):
    """
    A tool for filtering out low-quality fastq files
    """
    def __init__(self, input_fastq, output_fastq, min_accuracy=MIN_ACCURACY):
        self.input_reader = FastqReader(input_fastq)
        self.output_writer = FastqWriter(output_fastq)
        self.min_accuracy = min_accuracy

    def __call__(self):
        for fastq in self.input_reader:
            if predicted_accuracy(fastq) >= self.min_accuracy:
                self.output_writer.writeRecord(fastq)
Пример #6
0
class QualityFilter( object ):
    """
    A tool for filtering out low-quality fastq files
    """
    def __init__(self, input_fastq, output_fastq, min_accuracy=MIN_ACCURACY):
        self.input_reader = FastqReader(input_fastq)
        self.output_writer = FastqWriter(output_fastq)
        self.min_accuracy = min_accuracy

    def __call__(self):
        for fastq in self.input_reader:
            if predicted_accuracy(fastq) >= self.min_accuracy:
                self.output_writer.writeRecord( fastq )
Пример #7
0
class BarcodeTrimmer( object ):

    def __init__( self, input_file, barcode_file, prefix=None, filetype=None ):
        self.input_file = input_file
        self.barcode_file = barcode_file
        self.prefix = prefix or get_prefix( input_file )
        self.filetype = filetype or get_filetype( input_file )
        self.positions = {}

    def run( self ):
        self.parse_barcode_data()
        self.open_reader()
        self.open_writer()
        self.trim_sequences()

    def parse_barcode_data( self ):
        with open( self.barcode_file ) as handle:
            for entry in map(barcode._make, csv.reader(handle, delimiter='\t')):
                if entry.id == 'ID':
                    continue
                start = None if entry.end5 == 'NA' else int(entry.end5)
                end = None if entry.end3 == 'NA' else int(entry.end3)
                self.positions[entry.id] = (start, end)

    def open_reader( self ):
        if self.filetype == 'fasta':
            self.reader = FastaReader( self.input_file )
        elif self.filetype == 'fastq':
            self.reader = FastqReader( self.input_file )

    def open_writer( self ):
        if self.filetype == 'fasta':
            output_file = '%s.trim.fasta' % self.prefix
            self.writer = FastaWriter( output_file )
        elif self.filetype == 'fastq':
            output_file = '%s.trim.fastq' % self.prefix
            self.writer = FastqWriter( output_file )

    def trim_sequences( self ):
        for record in self.reader:
            try:
                start, end = self.positions[record.name]
            except:
                msg = 'Unknown sequence record "%s"!' % record.name
                log.error( msg )
                raise ValueError( msg )
            trimmed_record = trim_record( record, start, end )
            self.writer.writeRecord( trimmed_record )
Пример #8
0
class BarcodeTrimmer(object):
    def __init__(self, input_file, barcode_file, prefix=None, filetype=None):
        self.input_file = input_file
        self.barcode_file = barcode_file
        self.prefix = prefix or get_prefix(input_file)
        self.filetype = filetype or get_filetype(input_file)
        self.positions = {}

    def run(self):
        self.parse_barcode_data()
        self.open_reader()
        self.open_writer()
        self.trim_sequences()

    def parse_barcode_data(self):
        with open(self.barcode_file) as handle:
            for entry in map(barcode._make, csv.reader(handle,
                                                       delimiter='\t')):
                if entry.id == 'ID':
                    continue
                start = None if entry.end5 == 'NA' else int(entry.end5)
                end = None if entry.end3 == 'NA' else int(entry.end3)
                self.positions[entry.id] = (start, end)

    def open_reader(self):
        if self.filetype == 'fasta':
            self.reader = FastaReader(self.input_file)
        elif self.filetype == 'fastq':
            self.reader = FastqReader(self.input_file)

    def open_writer(self):
        if self.filetype == 'fasta':
            output_file = '%s.trim.fasta' % self.prefix
            self.writer = FastaWriter(output_file)
        elif self.filetype == 'fastq':
            output_file = '%s.trim.fastq' % self.prefix
            self.writer = FastqWriter(output_file)

    def trim_sequences(self):
        for record in self.reader:
            try:
                start, end = self.positions[record.name]
            except:
                msg = 'Unknown sequence record "%s"!' % record.name
                log.error(msg)
                raise ValueError(msg)
            trimmed_record = trim_record(record, start, end)
            self.writer.writeRecord(trimmed_record)
Пример #9
0
def pick_rep(fa_fq_filename,
             sam_filename,
             gff_filename,
             group_filename,
             output_filename,
             is_fq=False,
             pick_least_err_instead=False):
    """
    For each group, select the representative record

    If is FASTA file (is_fa False) -- then always pick the longest one
    If is FASTQ file (is_fq True) -- then 
          If pick_least_err_instead is True, pick the one w/ least number of expected base errors
          Else, pick the longest one
    """
    if is_fq:
        fd = LazyFastqReader(fa_fq_filename)
        fout = FastqWriter(output_filename)
    else:
        fd = LazyFastaReader(fa_fq_filename)
        fout = FastaWriter(output_filename)


#    for line in open(gff_filename):
#        # ex: chr1    PacBio  transcript      27567   29336   .       -       .       gene_id "PBfusion.1"; transcript_id "PBfusion.1.1";
#        raw = line.strip().split('\t')
#        if raw[2] == 'transcript':
#            # check if this is first or 2+ part of fusion
#            tid = raw[-1].split('; ')[1].split()[1][1:-2] # ex: tid = PBfusion.1.1
#            gid = tid[:tid.rfind('.')] # ex: gid = PBfusion.1
#            if tid.endswith('.1'):
#                coords[gid] = "{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])
#            else:
#                assert gid in coords
#                coords[gid] += "+{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])

    rep_info = {}
    id_to_rep = {}
    for line in open(group_filename):
        pb_id, members = line.strip().split('\t')
        print >> sys.stderr, "Picking representative sequence for", pb_id
        best_id = None
        best_seq = None
        best_qual = None
        best_err = 9999999
        err = 9999999
        max_len = 0
        for x in members.split(','):
            if is_fq and pick_least_err_instead:
                err = sum(i**-(i / 10.) for i in fd[x].quality)
            if (is_fq and pick_least_err_instead and err < best_err) or (
                (not is_fq or not pick_least_err_instead)
                    and len(fd[x].sequence) >= max_len):
                best_id = x
                best_seq = fd[x].sequence
                if is_fq:
                    best_qual = fd[x].quality
                    best_err = err
                max_len = len(fd[x].sequence)
        rep_info[pb_id] = (best_id, best_seq, best_qual)
        id_to_rep[best_id] = pb_id

    f_gff = open(gff_filename, 'w')
    coords = {}
    record_storage = {
    }  # temporary storage for the .1 record to write in conjunction with second record
    for r in BioReaders.GMAPSAMReader(sam_filename, True):
        if r.qID in id_to_rep:
            pb_id = id_to_rep[r.qID]
            best_id, best_seq, best_qual = rep_info[pb_id]

            # make coordinates & write the SAM file
            if r.qID not in coords:
                # this is the .1 portion
                coords[r.qID] = "{0}:{1}-{2}({3})".format(
                    r.sID, r.sStart, r.sEnd, r.flag.strand)
                isoform_index = 1
                record_storage[pb_id] = r
            else:
                # this is the .2 portion
                coords[r.qID] += "+{0}:{1}-{2}({3})".format(
                    r.sID, r.sStart, r.sEnd, r.flag.strand)
                isoform_index = 1

                old_r = record_storage[pb_id]
                f_gff.write("{chr}\tPacBio\ttranscript\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                    chr=old_r.sID, s=old_r.segments[0].start+1, e=old_r.segments[-1].end, pi=pb_id, j=isoform_index, strand=old_r.flag.strand))
                for s in old_r.segments:
                    f_gff.write("{chr}\tPacBio\texon\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                        chr=old_r.sID, s=s.start+1, e=s.end, pi=pb_id, j=isoform_index, strand=old_r.flag.strand))
                isoform_index = 2
                f_gff.write("{chr}\tPacBio\ttranscript\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                    chr=r.sID, s=r.segments[0].start+1, e=r.segments[-1].end, pi=pb_id, j=isoform_index, strand=r.flag.strand))
                for s in r.segments:
                    f_gff.write("{chr}\tPacBio\texon\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                        chr=r.sID, s=s.start+1, e=s.end, pi=pb_id, j=isoform_index, strand=r.flag.strand))
    f_gff.close()

    for pb_id in rep_info:
        best_id, best_seq, best_qual = rep_info[pb_id]
        _id_ = "{0}|{1}|{2}".format(pb_id, coords[best_id], best_id)
        _seq_ = best_seq
        if is_fq:
            fout.writeRecord(_id_, _seq_, best_qual)
        else:
            fout.writeRecord(_id_, _seq_)
def main():
    from argparse import ArgumentParser
    parser = ArgumentParser()
    parser.add_argument("input_prefix", help="Input prefix")
    parser.add_argument("output_prefix", help="Output prefix")
    parser.add_argument("--fuzzy_junction", type=int, default=5, help="Fuzzy junction max dist (default: 5bp)")

    args = parser.parse_args()

    #group_filename = args.input_prefix + '.group.txt'
    count_filename = args.input_prefix + '.abundance.txt'
    gff_filename = args.input_prefix + '.gff'
    rep_filename = args.input_prefix + '.rep.fq'

    recs = defaultdict(lambda: [])
    reader = GFF.collapseGFFReader(gff_filename)
    for r in reader:
        assert r.seqid.startswith('PB.')
        recs[int(r.seqid.split('.')[1])].append(r)

    good = []
    f = open(args.output_prefix + '.gff', 'w')
    keys = recs.keys()
    keys.sort()
    for k in recs:
        xxx = recs[k]
        filter_out_subsets(xxx, args.fuzzy_junction)
        for r in xxx:
            GFF.write_collapseGFF_format(f, r)
            good.append(r.seqid)
    f.close()

    # read abundance first
    f = open(count_filename)
    count_header = ''
    while True:
        cur_pos = f.tell()
        line = f.readline()
        if not line.startswith('#'):
            f.seek(cur_pos)
            break
        else:
            count_header += line
    d = dict((r['pbid'], r) for r in DictReader(f, delimiter='\t'))
    for k,v in d.iteritems():
        print k,v
    f.close()

    # write output rep.fq
    f = FastqWriter(args.output_prefix + '.rep.fq')
    for r in FastqReader(rep_filename):
        if r.name.split('|')[0] in good:
           f.writeRecord(r)
    f.close()

    # write output to .abundance.txt
    f = open(args.output_prefix + '.abundance.txt', 'w')
    f.write(count_header)
    writer = DictWriter(f, fieldnames=['pbid','count_fl','count_nfl','count_nfl_amb','norm_fl','norm_nfl','norm_nfl_amb'], \
                        delimiter='\t', lineterminator='\n')
    writer.writeheader()
    for k in good:
        r = d[k]
        writer.writerow(r)
    f.close()
Пример #11
0
def pick_rep(fa_fq_filename, sam_filename, gff_filename, group_filename, output_filename, is_fq=False, pick_least_err_instead=False):
    """
    For each group, select the representative record

    If is FASTA file (is_fa False) -- then always pick the longest one
    If is FASTQ file (is_fq True) -- then 
          If pick_least_err_instead is True, pick the one w/ least number of expected base errors
          Else, pick the longest one
    """
    if is_fq:
        fd = LazyFastqReader(fa_fq_filename)
        fout = FastqWriter(output_filename)
    else:
        fd = LazyFastaReader(fa_fq_filename)
        fout = FastaWriter(output_filename)


#    for line in open(gff_filename):
#        # ex: chr1    PacBio  transcript      27567   29336   .       -       .       gene_id "PBfusion.1"; transcript_id "PBfusion.1.1";
#        raw = line.strip().split('\t')
#        if raw[2] == 'transcript':
#            # check if this is first or 2+ part of fusion
#            tid = raw[-1].split('; ')[1].split()[1][1:-2] # ex: tid = PBfusion.1.1
#            gid = tid[:tid.rfind('.')] # ex: gid = PBfusion.1
#            if tid.endswith('.1'):
#                coords[gid] = "{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])
#            else:
#                assert gid in coords
#                coords[gid] += "+{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])

    rep_info = {}
    id_to_rep = {}
    for line in open(group_filename):
        pb_id, members = line.strip().split('\t')
        print >> sys.stderr, "Picking representative sequence for", pb_id
        best_id = None
        best_seq = None
        best_qual = None
        best_err = 9999999
        err = 9999999
        max_len = 0
        for x in members.split(','):
            if is_fq and pick_least_err_instead:
                err = sum(i**-(i/10.) for i in fd[x].quality)
            if (is_fq and pick_least_err_instead and err < best_err) or ((not is_fq or not pick_least_err_instead) and len(fd[x].sequence) >= max_len):
                best_id = x
                best_seq = fd[x].sequence
                if is_fq:
                    best_qual = fd[x].quality
                    best_err = err
                max_len = len(fd[x].sequence)
        rep_info[pb_id] = (best_id, best_seq, best_qual)
        id_to_rep[best_id] = pb_id

    f_gff = open(gff_filename, 'w')
    coords = {}
    record_storage = {} # temporary storage for the .1 record to write in conjunction with second record
    for r in BioReaders.GMAPSAMReader(sam_filename, True):
        if r.qID in id_to_rep:
            pb_id = id_to_rep[r.qID]
            best_id, best_seq, best_qual = rep_info[pb_id]

            # make coordinates & write the SAM file
            if r.qID not in coords:
                # this is the .1 portion
                coords[r.qID] = "{0}:{1}-{2}({3})".format(r.sID, r.sStart, r.sEnd, r.flag.strand)
                isoform_index = 1
                record_storage[pb_id] = r
            else:
                # this is the .2 portion
                coords[r.qID] += "+{0}:{1}-{2}({3})".format(r.sID, r.sStart, r.sEnd, r.flag.strand)
                isoform_index = 1

                old_r = record_storage[pb_id]
                f_gff.write("{chr}\tPacBio\ttranscript\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                    chr=old_r.sID, s=old_r.segments[0].start+1, e=old_r.segments[-1].end, pi=pb_id, j=isoform_index, strand=old_r.flag.strand))
                for s in old_r.segments:
                    f_gff.write("{chr}\tPacBio\texon\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                        chr=old_r.sID, s=s.start+1, e=s.end, pi=pb_id, j=isoform_index, strand=old_r.flag.strand))
                isoform_index = 2
                f_gff.write("{chr}\tPacBio\ttranscript\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                    chr=r.sID, s=r.segments[0].start+1, e=r.segments[-1].end, pi=pb_id, j=isoform_index, strand=r.flag.strand))
                for s in r.segments:
                    f_gff.write("{chr}\tPacBio\texon\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{pi}\"; transcript_id \"{pi}.{j}\";\n".format(\
                        chr=r.sID, s=s.start+1, e=s.end, pi=pb_id, j=isoform_index, strand=r.flag.strand))
    f_gff.close()

    for pb_id in rep_info:
        best_id, best_seq, best_qual = rep_info[pb_id]
        _id_ = "{0}|{1}|{2}".format(pb_id, coords[best_id], best_id)
        _seq_ = best_seq
        if is_fq:
            fout.writeRecord(_id_, _seq_, best_qual)
        else:
            fout.writeRecord(_id_, _seq_)
Пример #12
0
def filter_by_count(input_prefix, output_prefix, min_count):

    group_filename = input_prefix + ".group.txt"
    count_filename = input_prefix + ".abundance.txt"
    gff_filename = input_prefix + ".gff"
    rep_filename = input_prefix + ".rep.fq"

    # read group
    group_max_count_fl = {}
    group_max_count_p = {}
    f = open(group_filename)
    for line in f:
        # ex: PB.1.1  i0HQ_54b0ca|c58773/f30p16/700
        pbid, members = line.strip().split("\t")
        group_max_count_fl[pbid] = 0
        group_max_count_p[pbid] = 0
        members = members.split(",")
        for m in members:
            tmp = m.split("|")[1].split("/")[1]  # ex: tmp = f30p16
            fl_count, p_count = tmp.split("p")
            fl_count = int(fl_count[1:])
            p_count = int(p_count)
            group_max_count_fl[pbid] = max(group_max_count_fl[pbid], fl_count)
            group_max_count_p[pbid] = max(group_max_count_p[pbid], p_count)
    f.close()

    # read abundance first
    f = open(count_filename)
    count_header = ""
    while True:
        cur_pos = f.tell()
        line = f.readline()
        if not line.startswith("#"):
            f.seek(cur_pos)
            break
        else:
            count_header += line
    d = dict((r["pbid"], r) for r in DictReader(f, delimiter="\t"))
    for k, v in d.iteritems():
        print k, v
    f.close()

    # group_max_count_p NOT used for now
    good = filter(
        lambda x: int(d[x]["count_fl"]) >= min_count and group_max_count_fl[x] >= min_count and group_max_count_p >= 0,
        d,
    )

    # write output GFF
    f = open(output_prefix + ".gff", "w")
    for r in GFF.collapseGFFReader(gff_filename):
        if r.seqid in good:
            GFF.write_collapseGFF_format(f, r)
    f.close()

    # write output rep.fq
    f = FastqWriter(output_prefix + ".rep.fq")
    for r in FastqReader(rep_filename):
        if r.name.split("|")[0] in good:
            f.writeRecord(r)
    f.close()

    # write output to .abundance.txt
    f = open(output_prefix + ".abundance.txt", "w")
    f.write(count_header)
    writer = DictWriter(
        f,
        fieldnames=["pbid", "count_fl", "count_nfl", "count_nfl_amb", "norm_fl", "norm_nfl", "norm_nfl_amb"],
        delimiter="\t",
        lineterminator="\n",
    )
    writer.writeheader()
    for k in good:
        r = d[k]
        writer.writerow(r)
    f.close()
Пример #13
0
def filter_by_count(input_prefix, output_prefix, min_count):

    group_filename = input_prefix + '.group.txt'
    count_filename = input_prefix + '.abundance.txt'
    gff_filename = input_prefix + '.gff'
    rep_filename = input_prefix + '.rep.fq'

    # read group
    group_max_count_fl = {}
    group_max_count_p = {}
    f = open(group_filename)
    for line in f:
        #ex: PB.1.1  i0HQ_54b0ca|c58773/f30p16/700
        pbid, members = line.strip().split('\t')
        group_max_count_fl[pbid] = 0
        group_max_count_p[pbid] = 0
        members = members.split(',')
        for m in members:
            tmp = m.split('|')[1].split('/')[1]  #ex: tmp = f30p16
            fl_count, p_count = tmp.split('p')
            fl_count = int(fl_count[1:])
            p_count = int(p_count)
            group_max_count_fl[pbid] = max(group_max_count_fl[pbid], fl_count)
            group_max_count_p[pbid] = max(group_max_count_p[pbid], p_count)
    f.close()

    # read abundance first
    f = open(count_filename)
    count_header = ''
    while True:
        cur_pos = f.tell()
        line = f.readline()
        if not line.startswith('#'):
            f.seek(cur_pos)
            break
        else:
            count_header += line
    d = dict((r['pbid'], r) for r in DictReader(f, delimiter='\t'))
    for k, v in d.iteritems():
        print k, v
    f.close()

    # group_max_count_p NOT used for now
    good = filter(
        lambda x: int(d[x]['count_fl']) >= min_count and group_max_count_fl[x]
        >= min_count and group_max_count_p >= 0, d)

    # write output GFF
    f = open(output_prefix + '.gff', 'w')
    for r in GFF.collapseGFFReader(gff_filename):
        if r.seqid in good: GFF.write_collapseGFF_format(f, r)
    f.close()

    # write output rep.fq
    f = FastqWriter(output_prefix + '.rep.fq')
    for r in FastqReader(rep_filename):
        if r.name.split('|')[0] in good:
            f.writeRecord(r)
    f.close()

    # write output to .abundance.txt
    f = open(output_prefix + '.abundance.txt', 'w')
    f.write(count_header)
    writer = DictWriter(f, fieldnames=['pbid','count_fl','count_nfl','count_nfl_amb','norm_fl','norm_nfl','norm_nfl_amb'], \
                        delimiter='\t', lineterminator='\n')
    writer.writeheader()
    for k in good:
        r = d[k]
        writer.writerow(r)
    f.close()