def _create_hsp(hid, qid, hspd): """Returns a list of HSP objects from the given parsed HSP values.""" frags = [] # we are iterating over query_ranges, but hit_ranges works just as well for idx, qcoords in enumerate(hspd['query_ranges']): # get sequences, create object hseqlist = hspd.get('hit') hseq = '' if hseqlist is None else hseqlist[idx] qseqlist = hspd.get('query') qseq = '' if qseqlist is None else qseqlist[idx] frag = HSPFragment(hid, qid, hit=hseq, query=qseq) # coordinates frag.query_start = qcoords[0] frag.query_end = qcoords[1] frag.hit_start = hspd['hit_ranges'][idx][0] frag.hit_end = hspd['hit_ranges'][idx][1] # alignment annotation try: aln_annot = hspd.get('aln_annotation', {}) for key, value in aln_annot.items(): frag.aln_annotation[key] = value[idx] except IndexError: pass # strands frag.query_strand = hspd['query_strand'] frag.hit_strand = hspd['hit_strand'] # and append the hsp object to the list if frag.aln_annotation.get('similarity') is not None: if '#' in frag.aln_annotation['similarity']: frags.extend(_split_fragment(frag)) continue # try to set frame if there are translation in the alignment if len(frag.aln_annotation) > 1 or \ frag.query_strand == 0 or \ ('vulgar_comp' in hspd and re.search(_RE_TRANS, hspd['vulgar_comp'])): _set_frame(frag) frags.append(frag) # if the query is protein, we need to change the hit and query sequences # from three-letter amino acid codes to one letter, and adjust their # coordinates accordingly if len(frags[0].aln_annotation) == 2: # 2 annotations == protein query frags = _adjust_aa_seq(frags) hsp = HSP(frags) # set hsp-specific attributes for attr in ('score', 'hit_split_codons', 'query_split_codons', 'model', 'vulgar_comp', 'cigar_comp', 'alphabet'): if attr in hspd: setattr(hsp, attr, hspd[attr]) return hsp
def _set_qresult_hits(qresult, hit_rows=[]): """Helper function for appending Hits without alignments into QueryResults.""" for hit_row in hit_rows: hit_id, remainder = hit_row.split(' ', 1) # TODO: parse hit and hsp properties properly; by dealing with: # - any character in the description (brackets, spaces, etc.) # - possible [f] or [r] presence (for frame info) # - possible presence of E2() column # - possible incomplete hit_id due to column length limit # The current method only looks at the Hit ID, none of the things above if hit_id not in qresult: frag = HSPFragment(hit_id, qresult.id) hsp = HSP([frag]) hit = Hit([hsp]) qresult.append(hit) return qresult
def _parse_hit(self, query_id): while True: self.line = self.handle.readline() if self.line.startswith('>>'): break strand = None hsp_list = [] while True: peekline = self.handle.peekline() # yield hit if we've reached the start of a new query or # the end of the search if peekline.strip() in [">>><<<", ">>>///"] or \ (not peekline.startswith('>>>') and '>>>' in peekline): # append last parsed_hsp['hit']['seq'] line if state == _STATE_HIT_BLOCK: parsed_hsp['hit']['seq'] += self.line.strip() elif state == _STATE_CONS_BLOCK: hsp.aln_annotation['similarity'] += \ self.line.strip('\r\n') # process HSP alignment and coordinates _set_hsp_seqs(hsp, parsed_hsp, self._preamble['program']) hit = Hit(hsp_list) hit.description = hit_desc hit.seq_len = seq_len yield hit, strand hsp_list = [] break # yield hit and create a new one if we're still in the same query elif self.line.startswith('>>'): # try yielding, if we have hsps if hsp_list: _set_hsp_seqs(hsp, parsed_hsp, self._preamble['program']) hit = Hit(hsp_list) hit.description = hit_desc hit.seq_len = seq_len yield hit, strand hsp_list = [] # try to get the hit id and desc, and handle cases without descs try: hit_id, hit_desc = self.line[2:].strip().split(' ', 1) except ValueError: hit_id = self.line[2:].strip().split(' ', 1)[0] hit_desc = '' # create the HSP object for Hit frag = HSPFragment(hit_id, query_id) hsp = HSP([frag]) hsp_list.append(hsp) # set or reset the state to none state = _STATE_NONE parsed_hsp = {'query': {}, 'hit': {}} # create and append a new HSP if line starts with '>--' elif self.line.startswith('>--'): # set seq attributes of previous hsp _set_hsp_seqs(hsp, parsed_hsp, self._preamble['program']) # and create a new one frag = HSPFragment(hit_id, query_id) hsp = HSP([frag]) hsp_list.append(hsp) # set the state ~ none yet state = _STATE_NONE parsed_hsp = {'query': {}, 'hit': {}} # this is either query or hit data in the HSP, depending on the state elif self.line.startswith('>'): if state == _STATE_NONE: # make sure it's the correct query assert query_id.startswith(self.line[1:].split(' ')[0]), \ "%r vs %r" % (query_id, self.line) state = _STATE_QUERY_BLOCK parsed_hsp['query']['seq'] = '' elif state == _STATE_QUERY_BLOCK: # make sure it's the correct hit assert hit_id.startswith(self.line[1:].split(' ')[0]) state = _STATE_HIT_BLOCK parsed_hsp['hit']['seq'] = '' # check for conservation block elif self.line.startswith('; al_cons'): state = _STATE_CONS_BLOCK hsp.fragment.aln_annotation['similarity'] = '' elif self.line.startswith(';'): # Fasta outputs do not make a clear distinction between Hit # and HSPs, so we check the attribute names to determine # whether it belongs to a Hit or HSP regx = re.search(_RE_ATTR, self.line.strip()) name = regx.group(1) value = regx.group(2) # for values before the '>...' query block if state == _STATE_NONE: if name in _HSP_ATTR_MAP: attr_name, caster = _HSP_ATTR_MAP[name] if caster is not str: value = caster(value) if name in ['_ident', '_sim']: value *= 100 setattr(hsp, attr_name, value) # otherwise, pool the values for processing later elif state == _STATE_QUERY_BLOCK: parsed_hsp['query'][name] = value elif state == _STATE_HIT_BLOCK: if name == '_len': seq_len = int(value) else: parsed_hsp['hit'][name] = value # for values in the hit block else: raise ValueError("Unexpected line: %r" % self.line) # otherwise, it must be lines containing the sequences else: assert '>' not in self.line # if we're in hit, parse into hsp.hit if state == _STATE_HIT_BLOCK: parsed_hsp['hit']['seq'] += self.line.strip() elif state == _STATE_QUERY_BLOCK: parsed_hsp['query']['seq'] += self.line.strip() elif state == _STATE_CONS_BLOCK: hsp.fragment.aln_annotation['similarity'] += \ self.line.strip('\r\n') # we should not get here! else: raise ValueError("Unexpected line: %r" % self.line) self.line = self.handle.readline()
def _parse_qresult(self): """Generator function that returns QueryResult objects.""" # state values, used to determine what to do with each line state_EOF = 0 state_QRES_NEW = 1 state_QRES_SAME = 3 state_HIT_NEW = 2 state_HIT_SAME = 4 # dummies for initial states qres_state = None hit_state = None file_state = None # dummies for initial id caches prev_qid = None prev_hid = None # dummies for initial parsed value containers cur, prev = None, None hit_list, hsp_list = [], [] while True: # store previous line's parsed values if we've past the first line if cur is not None: prev = cur prev_qid = cur_qid prev_hid = cur_hid # only parse the line if it's not EOF or not a comment line if self.line and not self.line.startswith('#'): cur = self._parse_result_row() cur_qid = self._get_id(cur['qresult']) cur_hid = self._get_id(cur['hit']) else: file_state = state_EOF # mock values for cur_qid and cur_hid since the line is empty cur_qid, cur_hid = None, None # get the state of hit and qresult if prev_qid != cur_qid: qres_state = state_QRES_NEW else: qres_state = state_QRES_SAME # new hits are hits with different id or hits in a new qresult if prev_hid != cur_hid or qres_state == state_QRES_NEW: hit_state = state_HIT_NEW else: hit_state = state_HIT_SAME # we're creating objects for the previously parsed line(s), # so nothing is done in the first parsed line (prev == None) if prev is not None: # every line is essentially an HSP with one fragment, so we # create both of these for every line frag = HSPFragment(prev_hid, prev_qid) for attr, value in prev['frag'].items(): # adjust coordinates to Python range # NOTE: this requires both start and end coords to be # present, otherwise a KeyError will be raised. # Without this limitation, we might misleadingly set the # start / end coords for seq_type in ('query', 'hit'): if attr == seq_type + '_start': value = min(value, prev['frag'][seq_type + '_end']) - 1 elif attr == seq_type + '_end': value = max(value, prev['frag'][seq_type + '_start']) setattr(frag, attr, value) # strand and frame setattr require the full parsed values # to be set first for seq_type in ('hit', 'query'): # try to set hit and query frame frame = self._get_frag_frame(frag, seq_type, prev['frag']) setattr(frag, '%s_frame' % seq_type, frame) # try to set hit and query strand strand = self._get_frag_strand(frag, seq_type, prev['frag']) setattr(frag, '%s_strand' % seq_type, strand) hsp = HSP([frag]) for attr, value in prev['hsp'].items(): setattr(hsp, attr, value) hsp_list.append(hsp) # create hit and append to temp hit container if hit_state # says we're not at the same hit or at a new query if hit_state == state_HIT_NEW: hit = Hit(hsp_list) for attr, value in prev['hit'].items(): setattr(hit, attr, value) hit_list.append(hit) hsp_list = [] # create qresult and yield if we're at a new qresult or EOF if qres_state == state_QRES_NEW or file_state == state_EOF: qresult = QueryResult(hit_list, prev_qid) for attr, value in prev['qresult'].items(): setattr(qresult, attr, value) yield qresult # if current line is EOF, break if file_state == state_EOF: break hit_list = [] self.line = self.handle.readline().strip()
def _parse_hsp(self, root_hsp_frag_elem, query_id, hit_id): """Iterator that transforms Hit_hsps XML elements into HSP objects. Arguments: root_hsp_frag_elem -- Element object of the Hit_hsps tag. query_id -- Query ID string. hit_id -- Hit ID string. """ # Hit_hsps DTD: # <!ELEMENT Hsp ( # Hsp_num, # Hsp_bit-score, # Hsp_score, # Hsp_evalue, # Hsp_query-from, # Hsp_query-to, # Hsp_hit-from, # Hsp_hit-to, # Hsp_pattern-from?, # Hsp_pattern-to?, # Hsp_query-frame?, # Hsp_hit-frame?, # Hsp_identity?, # Hsp_positive?, # Hsp_gaps?, # Hsp_align-len?, # Hsp_density?, # Hsp_qseq, # Hsp_hseq, # Hsp_midline?)> # if value is None, feed the loop below an empty list if root_hsp_frag_elem is None: root_hsp_frag_elem = [] for hsp_frag_elem in root_hsp_frag_elem: coords = {} # temporary container for coordinates frag = HSPFragment(hit_id, query_id) for key, val_info in _ELEM_FRAG.items(): value = hsp_frag_elem.findtext(key) caster = val_info[1] # adjust 'from' and 'to' coordinates to 0-based ones if value is not None: if key.endswith('-from') or key.endswith('-to'): # store coordinates for further processing coords[val_info[0]] = caster(value) continue # recast only if value is not intended to be str elif caster is not str: value = caster(value) setattr(frag, val_info[0], value) # set the similarity characters into aln_annotation dict frag.aln_annotation['similarity'] = \ hsp_frag_elem.findtext('Hsp_midline') # process coordinates # since 'x-from' could be bigger than 'x-to', we need to figure # out which one is smaller/bigger since 'x_start' is always smaller # than 'x_end' for coord_type in ('query', 'hit', 'pattern'): start_type = coord_type + '_start' end_type = coord_type + '_end' try: start = coords[start_type] end = coords[end_type] except KeyError: continue else: # convert to python range and setattr setattr(frag, start_type, min(start, end) - 1) setattr(frag, end_type, max(start, end)) # set alphabet, based on program prog = self._meta.get('program') if prog == 'blastn': frag.alphabet = generic_dna elif prog in ['blastp', 'blastx', 'tblastn', 'tblastx']: frag.alphabet = generic_protein hsp = HSP([frag]) for key, val_info in _ELEM_HSP.items(): value = hsp_frag_elem.findtext(key) caster = val_info[1] if value is not None: if caster is not str: value = caster(value) setattr(hsp, val_info[0], value) # delete element after we finish parsing it hsp_frag_elem.clear() yield hsp
def _create_hits(self, hit_attrs, qid, qdesc): """Parses a HMMER3 hsp block, beginning with the hsp table.""" # read through until the beginning of the hsp block self._read_until(lambda line: line.startswith('Internal pipeline') or line.startswith('>>')) # start parsing the hsp block hit_list = [] while True: if self.line.startswith('Internal pipeline'): # by this time we should've emptied the hit attr list assert len(hit_attrs) == 0 return hit_list assert self.line.startswith('>>') hid, hdesc = self.line[len('>> '):].split(' ', 1) # read through the hsp table header and move one more line self._read_until(lambda line: line.startswith(' --- ------ ----- --------') or \ line.startswith(' [No individual domains')) self.line = read_forward(self.handle) # parse the hsp table for the current hit hsp_list = [] while True: # break out of hsp parsing if there are no hits, it's the last hsp # or it's the start of a new hit if self.line.startswith(' [No targets detected that satisfy') or \ self.line.startswith(' [No individual domains') or \ self.line.startswith('Internal pipeline statistics summary:') or \ self.line.startswith(' Alignments for each domain:') or \ self.line.startswith('>>'): hit_attr = hit_attrs.pop(0) hit = Hit(hsp_list) for attr, value in hit_attr.items(): setattr(hit, attr, value) if not hit: hit.query_description = qdesc hit_list.append(hit) break parsed = [x for x in self.line.strip().split(' ') if x] assert len(parsed) == 16 # parsed column order: # index, is_included, bitscore, bias, evalue_cond, evalue # hmmfrom, hmmto, query_ends, hit_ends, alifrom, alito, # envfrom, envto, acc_avg frag = HSPFragment(hid, qid) # HMMER3 alphabets are always protein alphabets frag.alphabet = generic_protein # depending on whether the program is hmmsearch, hmmscan, or phmmer # {hmm,ali}{from,to} can either be hit_{from,to} or query_{from,to} # for hmmscan, hit is the hmm profile, query is the sequence if self._meta.get('program') == 'hmmscan': # adjust 'from' and 'to' coordinates to 0-based ones frag.hit_start = int(parsed[6]) - 1 frag.hit_end = int(parsed[7]) frag.query_start = int(parsed[9]) - 1 frag.query_end = int(parsed[10]) elif self._meta.get('program') in ['hmmsearch', 'phmmer']: # adjust 'from' and 'to' coordinates to 0-based ones frag.hit_start = int(parsed[9]) - 1 frag.hit_end = int(parsed[10]) frag.query_start = int(parsed[6]) - 1 frag.query_end = int(parsed[7]) # strand is always 0, since HMMER now only handles protein frag.hit_strand = frag.query_strand = 0 hsp = HSP([frag]) hsp.domain_index = int(parsed[0]) hsp.is_included = parsed[1] == '!' hsp.bitscore = float(parsed[2]) hsp.bias = float(parsed[3]) hsp.evalue_cond = float(parsed[4]) hsp.evalue = float(parsed[5]) if self._meta.get('program') == 'hmmscan': # adjust 'from' and 'to' coordinates to 0-based ones hsp.hit_endtype = parsed[8] hsp.query_endtype = parsed[11] elif self._meta.get('program') in ['hmmsearch', 'phmmer']: # adjust 'from' and 'to' coordinates to 0-based ones hsp.hit_endtype = parsed[11] hsp.query_endtype = parsed[8] # adjust 'from' and 'to' coordinates to 0-based ones hsp.env_start = int(parsed[12]) - 1 hsp.env_end = int(parsed[13]) hsp.env_endtype = parsed[14] hsp.acc_avg = float(parsed[15]) hsp_list.append(hsp) self.line = read_forward(self.handle) # parse the hsp alignments if self.line.startswith(' Alignments for each domain:'): self._parse_aln_block(hid, hit.hsps)
def _parse_qresult(self): """Generator function that returns QueryResult objects.""" # state values, determines what to do for each line state_EOF = 0 state_QRES_NEW = 1 state_QRES_SAME = 3 # initial value dummies qres_state = None file_state = None prev_qid = None cur, prev = None, None # container for Hit objects, used to create QueryResult hit_list = [] while True: # store previous line's parsed values for all lines after the first if cur is not None: prev = cur prev_qid = cur_qid # only parse the result row if it's not EOF # NOTE: we are not parsing the extra '#' lines appended to the end # of hmmer31b1 tabular results since storing them in qresult # objects means we can not do a single-pass parsing if self.line and not self.line.startswith('#'): cur = self._parse_row() cur_qid = cur['qresult']['id'] else: file_state = state_EOF # mock value for cur_qid, since we have nothing to parse cur_qid = None if prev_qid != cur_qid: qres_state = state_QRES_NEW else: qres_state = state_QRES_SAME if prev is not None: # since domain tab formats only have 1 Hit per line # we always create HSPFragment, HSP, and Hit per line prev_hid = prev['hit']['id'] # create fragment and HSP and set their attributes frag = HSPFragment(prev_hid, prev_qid) for attr, value in prev['frag'].items(): setattr(frag, attr, value) hsp = HSP([frag]) for attr, value in prev['hsp'].items(): setattr(hsp, attr, value) # create Hit and set its attributes hit = Hit([hsp]) for attr, value in prev['hit'].items(): setattr(hit, attr, value) hit_list.append(hit) # create qresult and yield if we're at a new qresult or at EOF if qres_state == state_QRES_NEW or file_state == state_EOF: qresult = QueryResult(hit_list, prev_qid) for attr, value in prev['qresult'].items(): setattr(qresult, attr, value) yield qresult # if we're at EOF, break if file_state == state_EOF: break hit_list = [] self.line = self.handle.readline()
def _create_hits(self, hit_attrs, qid, qdesc): """Parses a HMMER3 hsp block, beginning with the hsp table.""" # read through until the beginning of the hsp block self._read_until(lambda line: line.startswith("Internal pipeline") or line.startswith(">>")) # start parsing the hsp block hit_list = [] while True: if self.line.startswith("Internal pipeline"): # by this time we should've emptied the hit attr list assert len(hit_attrs) == 0 return hit_list assert self.line.startswith(">>") hid, hdesc = self.line[len(">> ") :].split(" ", 1) # read through the hsp table header and move one more line self._read_until( lambda line: line.startswith(" --- ------ ----- --------") or line.startswith(" [No individual domains") ) self.line = read_forward(self.handle) # parse the hsp table for the current hit hsp_list = [] while True: # break out of hsp parsing if there are no hits, it's the last hsp # or it's the start of a new hit if ( self.line.startswith(" [No targets detected that satisfy") or self.line.startswith(" [No individual domains") or self.line.startswith("Internal pipeline statistics summary:") or self.line.startswith(" Alignments for each domain:") or self.line.startswith(">>") ): hit_attr = hit_attrs.pop(0) hit = Hit(hsp_list) for attr, value in hit_attr.items(): setattr(hit, attr, value) if not hit: hit.query_description = qdesc hit_list.append(hit) break parsed = [x for x in self.line.strip().split(" ") if x] assert len(parsed) == 16 # parsed column order: # index, is_included, bitscore, bias, evalue_cond, evalue # hmmfrom, hmmto, query_ends, hit_ends, alifrom, alito, # envfrom, envto, acc_avg frag = HSPFragment(hid, qid) # HMMER3 alphabets are always protein alphabets frag.alphabet = generic_protein # depending on whether the program is hmmsearch, hmmscan, or phmmer # {hmm,ali}{from,to} can either be hit_{from,to} or query_{from,to} # for hmmscan, hit is the hmm profile, query is the sequence if self._meta.get("program") == "hmmscan": # adjust 'from' and 'to' coordinates to 0-based ones frag.hit_start = int(parsed[6]) - 1 frag.hit_end = int(parsed[7]) frag.query_start = int(parsed[9]) - 1 frag.query_end = int(parsed[10]) elif self._meta.get("program") in ["hmmsearch", "phmmer"]: # adjust 'from' and 'to' coordinates to 0-based ones frag.hit_start = int(parsed[9]) - 1 frag.hit_end = int(parsed[10]) frag.query_start = int(parsed[6]) - 1 frag.query_end = int(parsed[7]) # strand is always 0, since HMMER now only handles protein frag.hit_strand = frag.query_strand = 0 hsp = HSP([frag]) hsp.domain_index = int(parsed[0]) hsp.is_included = parsed[1] == "!" hsp.bitscore = float(parsed[2]) hsp.bias = float(parsed[3]) hsp.evalue_cond = float(parsed[4]) hsp.evalue = float(parsed[5]) if self._meta.get("program") == "hmmscan": # adjust 'from' and 'to' coordinates to 0-based ones hsp.hit_endtype = parsed[8] hsp.query_endtype = parsed[11] elif self._meta.get("program") in ["hmmsearch", "phmmer"]: # adjust 'from' and 'to' coordinates to 0-based ones hsp.hit_endtype = parsed[11] hsp.query_endtype = parsed[8] # adjust 'from' and 'to' coordinates to 0-based ones hsp.env_start = int(parsed[12]) - 1 hsp.env_end = int(parsed[13]) hsp.env_endtype = parsed[14] hsp.acc_avg = float(parsed[15]) hsp_list.append(hsp) self.line = read_forward(self.handle) # parse the hsp alignments if self.line.startswith(" Alignments for each domain:"): self._parse_aln_block(hid, hit.hsps)
def __iter__(self): for rec in self.blast_iter: # set attributes to SearchIO's # get id and desc if rec.query.startswith('>'): rec.query = rec.query[1:] try: qid, qdesc = rec.query.split(' ', 1) except ValueError: qid, qdesc = rec.query, '' qdesc = qdesc.replace('\n', '').replace('\r', '') qresult = QueryResult(id=qid) qresult.program = rec.application.lower() qresult.target = rec.database qresult.seq_len = rec.query_letters qresult.version = rec.version # determine alphabet based on program if qresult.program == 'blastn': alphabet = generic_dna elif qresult.program in ['blastp', 'blastx', 'tblastn', 'tblastx']: alphabet = generic_protein # iterate over the 'alignments' (hits) and the hit table for idx, aln in enumerate(rec.alignments): # get id and desc if aln.title.startswith('> '): aln.title = aln.title[2:] elif aln.title.startswith('>'): aln.title = aln.title[1:] try: hid, hdesc = aln.title.split(' ', 1) except ValueError: hid, hdesc = aln.title, '' hdesc = hdesc.replace('\n', '').replace('\r', '') # iterate over the hsps and group them in a list hsp_list = [] for bhsp in aln.hsps: frag = HSPFragment(hid, qid) frag.alphabet = alphabet # set alignment length frag.aln_span = bhsp.identities[1] # set frames try: frag.query_frame = int(bhsp.frame[0]) except IndexError: if qresult.program in ('blastp', 'tblastn'): frag.query_frame = 0 else: frag.query_frame = 1 try: frag.hit_frame = int(bhsp.frame[1]) except IndexError: if qresult.program in ('blastp', 'tblastn'): frag.hit_frame = 0 else: frag.hit_frame = 1 # set query coordinates frag.query_start = min(bhsp.query_start, bhsp.query_end) - 1 frag.query_end = max(bhsp.query_start, bhsp.query_end) # set hit coordinates frag.hit_start = min(bhsp.sbjct_start, bhsp.sbjct_end) - 1 frag.hit_end = max(bhsp.sbjct_start, bhsp.sbjct_end) # set query, hit sequences and its annotation qseq = '' hseq = '' midline = '' for seqtrio in zip(bhsp.query, bhsp.sbjct, bhsp.match): qchar, hchar, mchar = seqtrio if qchar == ' ' or hchar == ' ': assert all(' ' == x for x in seqtrio) else: qseq += qchar hseq += hchar midline += mchar frag.query, frag.hit = qseq, hseq frag.aln_annotation['similarity'] = midline # create HSP object with the fragment hsp = HSP([frag]) hsp.evalue = bhsp.expect hsp.bitscore = bhsp.bits hsp.bitscore_raw = bhsp.score # set gap try: hsp.gap_num = bhsp.gaps[0] except IndexError: hsp.gap_num = 0 # set identity hsp.ident_num = bhsp.identities[0] hsp.pos_num = bhsp.positives[0] if hsp.pos_num is None: hsp.pos_num = hsp[0].aln_span hsp_list.append(hsp) hit = Hit(hsp_list) hit.seq_len = aln.length hit.description = hdesc qresult.append(hit) qresult.description = qdesc yield qresult
def _create_hsp(hid, qid, psl): # protein flag is_protein = _is_protein(psl) # strand #if query is protein, strand is 0 if is_protein: qstrand = 0 else: qstrand = 1 if psl['strand'][0] == '+' else -1 # try to get hit strand, if it exists try: hstrand = 1 if psl['strand'][1] == '+' else -1 except IndexError: hstrand = 1 # hit strand defaults to plus # query block starts qstarts = _reorient_starts(psl['qstarts'], psl['blocksizes'], psl['qsize'], qstrand) # hit block starts if len(psl['strand']) == 2: hstarts = _reorient_starts(psl['tstarts'], psl['blocksizes'], psl['tsize'], hstrand) else: hstarts = psl['tstarts'] # set query and hit coords # this assumes each block has no gaps (which seems to be the case) assert len(qstarts) == len(hstarts) == len(psl['blocksizes']) query_range_all = list( zip(qstarts, [x + y for x, y in zip(qstarts, psl['blocksizes'])])) hit_range_all = list( zip(hstarts, [x + y for x, y in zip(hstarts, psl['blocksizes'])])) # check length of sequences and coordinates, all must match if 'tseqs' in psl and 'qseqs' in psl: assert len(psl['tseqs']) == len(psl['qseqs']) == \ len(query_range_all) == len(hit_range_all) else: assert len(query_range_all) == len(hit_range_all) frags = [] # iterating over query_range_all, but hit_range_all works just as well for idx, qcoords in enumerate(query_range_all): hseqlist = psl.get('tseqs') hseq = '' if not hseqlist else hseqlist[idx] qseqlist = psl.get('qseqs') qseq = '' if not qseqlist else qseqlist[idx] frag = HSPFragment(hid, qid, hit=hseq, query=qseq) # set alphabet frag.alphabet = generic_dna # set coordinates frag.query_start = qcoords[0] frag.query_end = qcoords[1] frag.hit_start = hit_range_all[idx][0] frag.hit_end = hit_range_all[idx][1] # and strands frag.query_strand = qstrand frag.hit_strand = hstrand frags.append(frag) # create hsp object hsp = HSP(frags) # check if start and end are set correctly assert hsp.query_start == psl['qstart'] assert hsp.query_end == psl['qend'] assert hsp.hit_start == psl['tstart'] assert hsp.hit_end == psl['tend'] # and check block spans as well assert hsp.query_span_all == hsp.hit_span_all == psl['blocksizes'] # set its attributes hsp.match_num = psl['matches'] hsp.mismatch_num = psl['mismatches'] hsp.match_rep_num = psl['repmatches'] hsp.n_num = psl['ncount'] hsp.query_gapopen_num = psl['qnuminsert'] hsp.query_gap_num = psl['qbaseinsert'] hsp.hit_gapopen_num = psl['tnuminsert'] hsp.hit_gap_num = psl['tbaseinsert'] hsp.ident_num = psl['matches'] + psl['repmatches'] hsp.gapopen_num = psl['qnuminsert'] + psl['tnuminsert'] hsp.gap_num = psl['qbaseinsert'] + psl['tbaseinsert'] hsp.query_is_protein = is_protein hsp.ident_pct = 100.0 - _calc_millibad(psl, is_protein) * 0.1 hsp.score = _calc_score(psl, is_protein) # helper flag, for writing hsp._has_hit_strand = len(psl['strand']) == 2 return hsp
def _parse_qresult(self): """Generator function that returns QueryResult objects.""" # state values, determines what to do for each line state_EOF = 0 state_QRES_NEW = 1 state_QRES_SAME = 3 state_HIT_NEW = 2 state_HIT_SAME = 4 # dummies for initial states qres_state = None hit_state = None file_state = None # dummies for initial id caches prev_qid = None prev_hid = None # dummies for initial parsed value containers cur, prev = None, None hit_list, hsp_list = [], [] while True: # store previous line's parsed values, for every line after the 1st if cur is not None: prev = cur prev_qid = cur_qid prev_hid = cur_hid # only parse the line if it's not EOF if self.line and not self.line.startswith('#'): cur = self._parse_row() cur_qid = cur['qresult']['id'] cur_hid = cur['hit']['id'] else: file_state = state_EOF # mock ID values since the line is empty cur_qid, cur_hid = None, None # get the state of hit and qresult if prev_qid != cur_qid: qres_state = state_QRES_NEW else: qres_state = state_QRES_SAME # new hits are hits with different ids or hits in a new qresult if prev_hid != cur_hid or qres_state == state_QRES_NEW: hit_state = state_HIT_NEW else: hit_state = state_HIT_SAME # start creating objects after the first line (i.e. prev is filled) if prev is not None: # each line is basically an HSP with one HSPFragment frag = HSPFragment(prev_hid, prev_qid) for attr, value in prev['frag'].items(): setattr(frag, attr, value) hsp = HSP([frag]) for attr, value in prev['hsp'].items(): setattr(hsp, attr, value) hsp_list.append(hsp) # create hit object when we've finished parsing all its hsps # i.e. when hit state is state_HIT_NEW if hit_state == state_HIT_NEW: hit = Hit(hsp_list) for attr, value in prev['hit'].items(): setattr(hit, attr, value) hit_list.append(hit) hsp_list = [] # create qresult and yield if we're at a new qresult or EOF if qres_state == state_QRES_NEW or file_state == state_EOF: qresult = QueryResult(hit_list, prev_qid) for attr, value in prev['qresult'].items(): setattr(qresult, attr, value) yield qresult # if current line is EOF, break if file_state == state_EOF: break hit_list = [] self.line = self.handle.readline()
def _create_hsp(hid, qid, psl): # protein flag is_protein = _is_protein(psl) # strand #if query is protein, strand is 0 if is_protein: qstrand = 0 else: qstrand = 1 if psl['strand'][0] == '+' else -1 # try to get hit strand, if it exists try: hstrand = 1 if psl['strand'][1] == '+' else -1 except IndexError: hstrand = 1 # hit strand defaults to plus # query block starts qstarts = _reorient_starts(psl['qstarts'], psl['blocksizes'], psl['qsize'], qstrand) # hit block starts if len(psl['strand']) == 2: hstarts = _reorient_starts(psl['tstarts'], psl['blocksizes'], psl['tsize'], hstrand) else: hstarts = psl['tstarts'] # set query and hit coords # this assumes each block has no gaps (which seems to be the case) assert len(qstarts) == len(hstarts) == len(psl['blocksizes']) query_range_all = list(zip(qstarts, [x + y for x, y in zip(qstarts, psl['blocksizes'])])) hit_range_all = list(zip(hstarts, [x + y for x, y in zip(hstarts, psl['blocksizes'])])) # check length of sequences and coordinates, all must match if 'tseqs' in psl and 'qseqs' in psl: assert len(psl['tseqs']) == len(psl['qseqs']) == \ len(query_range_all) == len(hit_range_all) else: assert len(query_range_all) == len(hit_range_all) frags = [] # iterating over query_range_all, but hit_range_all works just as well for idx, qcoords in enumerate(query_range_all): hseqlist = psl.get('tseqs') hseq = '' if not hseqlist else hseqlist[idx] qseqlist = psl.get('qseqs') qseq = '' if not qseqlist else qseqlist[idx] frag = HSPFragment(hid, qid, hit=hseq, query=qseq) # set alphabet frag.alphabet = generic_dna # set coordinates frag.query_start = qcoords[0] frag.query_end = qcoords[1] frag.hit_start = hit_range_all[idx][0] frag.hit_end = hit_range_all[idx][1] # and strands frag.query_strand = qstrand frag.hit_strand = hstrand frags.append(frag) # create hsp object hsp = HSP(frags) # check if start and end are set correctly assert hsp.query_start == psl['qstart'] assert hsp.query_end == psl['qend'] assert hsp.hit_start == psl['tstart'] assert hsp.hit_end == psl['tend'] # and check block spans as well assert hsp.query_span_all == hsp.hit_span_all == psl['blocksizes'] # set its attributes hsp.match_num = psl['matches'] hsp.mismatch_num = psl['mismatches'] hsp.match_rep_num = psl['repmatches'] hsp.n_num = psl['ncount'] hsp.query_gapopen_num = psl['qnuminsert'] hsp.query_gap_num = psl['qbaseinsert'] hsp.hit_gapopen_num = psl['tnuminsert'] hsp.hit_gap_num = psl['tbaseinsert'] hsp.ident_num = psl['matches'] + psl['repmatches'] hsp.gapopen_num = psl['qnuminsert'] + psl['tnuminsert'] hsp.gap_num = psl['qbaseinsert'] + psl['tbaseinsert'] hsp.query_is_protein = is_protein hsp.ident_pct = 100.0 - _calc_millibad(psl, is_protein) * 0.1 hsp.score = _calc_score(psl, is_protein) # helper flag, for writing hsp._has_hit_strand = len(psl['strand']) == 2 return hsp
def parse_hsps(self, hit_placeholders): """Parse a HMMER2 hsp block, beginning with the hsp table.""" # HSPs may occur in different order than the hits # so store Hit objects separately first unordered_hits = {} while self.read_next(): if self.line.startswith('Alignments') or \ self.line.startswith('Histogram') or \ self.line == '//': break if self.line.startswith('Model') or \ self.line.startswith('Sequence') or \ self.line.startswith('--------'): continue id_, domain, seq_f, seq_t, seq_compl, hmm_f, hmm_t, hmm_compl, \ score, evalue = self.line.split() frag = HSPFragment(id_, self.qresult.id) frag.alphabet = generic_protein if self._meta['program'] == 'hmmpfam': frag.hit_start = int(hmm_f) - 1 frag.hit_end = int(hmm_t) frag.query_start = int(seq_f) - 1 frag.query_end = int(seq_t) elif self._meta['program'] == 'hmmsearch': frag.query_start = int(hmm_f) - 1 frag.query_end = int(hmm_t) frag.hit_start = int(seq_f) - 1 frag.hit_end = int(seq_t) hsp = HSP([frag]) hsp.evalue = float(evalue) hsp.bitscore = float(score) hsp.domain_index = int(domain.split('/')[0]) if self._meta['program'] == 'hmmpfam': hsp.hit_endtype = hmm_compl hsp.query_endtype = seq_compl elif self._meta['program'] == 'hmmsearch': hsp.query_endtype = hmm_compl hsp.hit_endtype = seq_compl if id_ not in unordered_hits: placeholder = [ p for p in hit_placeholders if p.id_ == id_][0] hit = placeholder.createHit([hsp]) unordered_hits[id_] = hit else: hit = unordered_hits[id_] hsp.hit_description = hit.description hit.append(hsp) # The placeholder list is in the correct order, so use that order for # the Hit objects in the qresult for p in hit_placeholders: self.qresult.append(unordered_hits[p.id_])