def core_hgt_blast(perc_identity='99'): """ Blasts all core genomes against core db - Set `perc_identity` if desired (default = 99) """ if not os.path.isdir('blast_results/core/'): os.makedirs('blast_results/core/') for species in kv.get_collection('core').distinct('species'): query_fasta = 'blast_results/core/{}_tmp.fna'.format(species) with open(query_fasta, 'w+') as query_handle: for query in kv.get_collection('core').find({'species':species}): if query['type'] == 'gene': query_handle.write('>{0}|{1}\n{2}\n'.format( query['species'], query['_id'], query['dna_seq'] ) ) print 'Blasting {0}'.format(species) out = Popen( ['blastn', '-query', query_fasta, '-db', 'blast_databases/core', '-outfmt', '5', '-out', 'blast_results/core/{}_{}_blast.xml'.format(species, perc_identity), '-perc_identity', perc_identity ], stdout=PIPE ).communicate()[0] os.remove(query_fasta)
def ssu_fasta(): with open('16s.fna', 'w+') as out_handle: for species in kv.get_collection('16S').distinct('species'): ssu = kv.get_collection('16S').find_one({'species':species}) if ssu: out_handle.write(kv.make_gene_fasta(ssu, to_file=False)) else: print species
def make_indexed_fasta(species): if not os.path.isdir('fastas/'): os.makedirs('fastas/') id_list =[] indexed_species = kv.index_contigs(kv.get_collection(species)) indexed_species2 = kv.index_contigs(kv.get_collection(species)) fasta = 'fastas/{}_indexed.fna'.format(species) for record in indexed_species: id_list.append(record['_id']) if not os.path.isfile(fasta): with open(fasta, 'w+') as output_handle: for record in indexed_species2: output_handle.write( ">{}|{}\n{}\n".format(record['species'].replace(' ', '_'), record['_id'], record['dna_seq']) )
def get_groups(): all_hits = kv.get_collection('hits') groups_list = [] for h in all_hits.find(): current_species = h['species'] # if any([x in current_species for x in dutton_list] or [y in current_species for y in wolfe_list]): current_species_islands = get_islands(h['species']) # each sublist represents one island... for island in current_species_islands: hit_set = set() # container for hits for gene_id in island: gene_hits = h['hits'][gene_id[1]] # Pulls each hit id tuple, then appends it to group_set for hit in gene_hits: hit_set.add((hit[0], hit[1])) # add id tuples for hits to island list... island.update(hit_set) # And add new island (with multiple species) to groups_list groups_list.append(list(island)) # Since each species' islands are built independently, there's a lot of redundancy # So... Collapse lists that contain shared elements and deduplicate return map(list, collapse_lists(groups_list))
def get_islands(species_name): islands = [] species_hits_list = [] # Add mongo_record for each hit in any gene all_hits = kv.get_collection('hits') for query_id in species_hits: if species_hits[query_id]: species_hits_list.append( kv.get_mongo_record(species_name, query_id) ) for entry_1 in species_hits_list: entry_recorded = False for entry_2 in species_hits_list: if entry_1 == entry_2: pass elif entry_1['location']['contig'] != entry_2['location']['contig']: pass else: location_1 = entry_1['location'] location_2 = entry_2['location'] if abs(location_1['end'] - location_2['start']) <= 5000: entry_recorded = True islands.append([ (entry_1['species'], str(entry_1['_id'])), (entry_2['species'], str(entry_2['_id'])) ]) if not entry_recorded: islands.append([(entry_1['species'], str(entry_1['_id']))]) return collapse_lists(islands)
def core_hgt_groups(perc_identity='99'): """ Returns mutilspecies groups of genes as list of lists. - Starts with species islands `get_islands()` - For each island, if a hit from that island is in another island, group them together """ all_hits = kv.get_collection('hits') groups_list = [] for s in all_hits.distinct('species'): s_hits = all_hits.find_one({'species':s}) current_species_islands = get_islands(s_hits['species']) # each sublist represents one island... for island in current_species_islands: if island: # many lists are empty, skip those hit_set = set() # container for hits for gene_id in island: gene_hits = s_hits['core_hits_{}'.format(perc_identity)][gene_id[1]] # Pulls each hit id tuple, then appends it to group_set for hit in gene_hits: hit_set.add((hit[0], hit[1])) # add id tuples for hits to island list... island.update(hit_set) # And add new island (with multiple species) to groups_list groups_list.append(list(island)) # Since each species' islands are built independently, there's a lot of redundancy # So... Collapse lists that contain shared elements and deduplicate return map(list, collapse_lists(groups_list))
def get_tree(core=False, newick=False): all_species = kv.get_collection('core').distinct('species') if core: pass else: all_species.extend(kv.get_collection('other').distinct('species')) t = tree.nj(dm) print t.ascii_art() tips = [] for node in t.tips(): print node.name, node.length tips.append(node.name.replace(' ', '_')) if newick: n = tree.nj(dm, result_constructor=str) print n else: return (t, tips)
def core_hgt_stats(perc_identity='99'): """ Returns stats of HGT (number of events etc) """ collection = kv.get_collection('core') df_index = ['Total_CDS', 'HGT_CDS', 'Islands'] df = pd.DataFrame() for species in collection.distinct('species'): hits = kv.get_collection('hits').find_one({'species':species})['core_hits_{}'.format(perc_identity)] series = pd.Series([ sum([1 for x in collection.find({'species':species})]), sum([1 for x in hits if hits[x]]), len(get_islands(species)) ], name=species, index=df_index) df = df.append(series) df.to_csv('stats.csv', columns=df_index)
def output_distance_matrix(core=False, to_file=True): all_species = kv.get_collection('core').distinct('species')) if core: pass else: all_species.extend(kv.get_collection('other').distinct('species')) distance_matrix = pd.DataFrame(data={n:0.0 for n in all_species}, index=all_species) for pair in combinations_with_replacement(all_species, 2): distance = get_16S_distance(pair[0], pair[1]) distance_matrix[pair[0]][pair[1]] = distance distance_matrix[pair[1]][pair[0]] = distance if to_file: distance_matrix.to_csv('distance_matrix.csv') else: return distance_matrix
def make_species_fasta(species): if not os.path.isdir('fastas'): os.makedirs('fastas') fasta = 'fastas/{}.fna'.format(species) if not os.path.isfile(fasta): with open(fasta, 'w+') as output_handle: for record in kv.get_collection(species).find(): output_handle.write( ">{}|{}\n{}\n".format(record['species'].replace(' ', '_'), record['_id'], record['dna_seq']) )
def output_all_16S(): print "Making fasta of all 16S in database {}".format(kv.db.name) with open('{}_16S.fna'.format(kv.db.name), 'w+') as output_handle: for record in kv.get_collection('16S').find(): output_handle.write( '>{0}\n{1}\n'.format( record['species'], record['dna_seq'], ) )
def get_distance_matrix(core=False, to_file=True): all_species = kv.get_collection('core').distinct('species') if core: pass else: all_species.extend(kv.get_collection('other').distinct('species')) ssu_species = [n for n in all_species if kv.db['16S'].find_one({'species':n})] distance_matrix = pd.DataFrame(data={n:0.0 for n in ssu_species}, index=ssu_species, columns=ssu_species) for pair in combinations_with_replacement(ssu_species, 2): distance = get_16S_distance(pair[0], pair[1]) if distance: distance_matrix[pair[0]][pair[1]] = distance distance_matrix[pair[1]][pair[0]] = distance if to_file: distance_matrix.to_csv('distance_matrix.csv') else: return distance_matrix
def group_hits(core=False): all_species = kv.get_collection('core').distinct('species') if not core: all_species.extend(kv.get_collection('other').distinct('species')) hits_db = kv.get_collection('hits') species_index = sorted(all_species) print species_index df = pd.DataFrame() core_groups = sorted(core_hgt_groups(), key=len, reverse=True) for group in sorted(hits_db.distinct('group')): recorded = [] s = {sp:0.0 for sp in species_index} for hit in core_groups[group-1]: if not hit in recorded: s[hit[0]] += len(kv.get_mongo_record(*hit)['dna_seq']) recorded.append(hit) for hit in hits_db.find_one({'group':group})['group_hits']: if float(hit[2]) > 90 and float(hit[3]) > 100: if hit[1] not in recorded: s[kv.fasta_id_parse(hit[1])[0]] += float(hit[2])*float(hit[3])/100 recorded.append(hit[1]) s = pd.Series(s, name='group_{}'.format(group)) df['group_{}'.format(group)] = s df.to_csv('group_hits_other.csv') # if __name__ == '__main__': # import os # kv.mongo_init('pacbio2') # os.chdir('/Users/KBLaptop/computation/kvasir/data/output/pacbio2/') # # group_hits(core=True) # # output_groups() # # core_hgt_stats() # output_hits_csv()
def get_tree(core=False, newick=False): core_collection = kv.get_collection('core') all_species = core_collection.distinct('species') if core: pass else: other_collection = kv.get_collection('other') all_species.extend(other_collection.distinct('species')) ssu_species = [n for n in all_species if kv.db['16S'].find_one({'species':n})] dm = DistanceMatrix(get_distance_matrix(core=core, to_file=False), ssu_species) t = tree.nj(dm) print t.ascii_art() tips = [] for node in t.tips(): print node.name, node.length tips.append(node.name.replace(' ', '_')) if newick: n = tree.nj(dm, result_constructor=str) print n else: return (t, tips)
def other_blast(): groups_list = core_hgt_groups() groups_list.sort(key=len, reverse=True) for i in range(len(groups_list)): group_hits = [] kv.make_id_list_fasta(groups_list[i], 'core') results = blast_vs_db('tmp.fna', 'blast_databases/other') hits_collection = kv.get_collection('hits') if results: for j in range(len(results)): group_hits.append(results[j]) hits_collection.insert_one({'group':(i+1), 'group_hits':group_hits})
def pair_compare(species_1, species_2): shared_CDS = 0 shared_nt = 0 s1_genes = kv.get_collection('hits').find_one({'species':species_1}) for gene in s1_genes['hits']: if s1_genes['hits'][gene]: for hit in s1_genes['hits'][gene]: if hit[0] == species_2: shared_CDS += 1 species_2_record = kv.get_mongo_record(hit[0],hit[1]) hit_loc = species_2_record['location'] shared_nt += hit_loc['end'] - hit_loc['start'] return shared_CDS, shared_nt
def blast_to_db(db='core', perc_identity='99'): blast_dir = 'blast_results/{}/'.format(db) for f in os.listdir(blast_dir): if f.endswith('{}_blast.xml'.format(perc_identity)): file_handle = 'blast_results/{}/{}'.format(db,f) with open(file_handle, 'r') as result_handle: blast_records = NCBIXML.parse(result_handle) hits_dict = {} for blast_record in blast_records: query_parse = re.search(r'(\w+)\|(\w+)', blast_record.query) query_genus_parse = re.match(r'([A-Za-z]+)_', blast_record.query) query_genus = query_genus_parse.group(1) query_name = query_parse.group(1) query_id = query_parse.group(2) hits_dict[query_id] = [] for alignment in blast_record.alignments: hit_parse = re.search(r'(\w+)\|(\w+)', alignment.hit_def) hit_genus_parse = re.match(r'([A-Za-z]+)_', alignment.hit_def) hit_genus = hit_genus_parse.group(1) hit_name = hit_parse.group(1) hit_id = hit_parse.group(2) if query_name == hit_name: pass elif query_genus == hit_genus: print "Oops! {} and {} are the same genus, skipping...".format(query_name, hit_name) pass elif kv.get_mongo_record(hit_name, hit_id)['type'] == '16S': print 'Skipping 16S hit' else: print '=======\nhit for {0} detected:\nspecies: {1}\n======='.format(query_name, hit_name) hits_dict[query_id].append((hit_name, hit_id)) print 'Updataing mongoDB with hits' hits_collection = kv.get_collection('hits') hits_collection.update_one( {'species':query_name}, {'$set':{'{}_hits_{}'.format(db, perc_identity):{x:hits_dict[x] for x in hits_dict if hits_dict[x]}}}, upsert=True )
def add_ssu(supp_file): # df = pd.read_csv(supp_file) # print df.columns # new_df = pd.DataFrame() # # for i in range(len(df['Strain'])): # # print df['Strain'][i].replace(' ', '_').replace('.', '') # strain = pd.Series([df['Strain'][i].replace(' ', '_').replace('.', '') for i in range(len(df['Strain']))], name='strain') # ssus = df['sequences of the 16s rRNA genes'] # ssu = pd.Series([ssus[i].replace(r'\n', '') if not pd.isnull(ssus[i]) else None for i in range(len(ssus))], name='16S') # new_df['strain'] = strain # new_df['16S'] = ssu # new_df.to_csv('ssu.csv') ssu_df = pd.read_csv(supp_file) for i in range(len(ssu_df['strain'])): print ssu_df['strain'][i], ssu_df['16S'][i] if not pd.isnull(ssu_df['16S'][i]): gene_record = { 'species':ssu_df['strain'][i], 'location':{ 'contig':None, 'start':None, 'end':None, 'strand':None, }, 'annotation':'Small subunit ribosomal RNA', 'dna_seq':ssu_df['16S'][i], 'kvtag':None, 'type':'16S' } print "adding 16S gene!" kv.get_collection('16S').remove({'species':ssu_df['strain'][i]}) print kv.get_collection('16S').find_one({'species':ssu_df['strain'][i]}) kv.get_collection('16S').insert_one(gene_record) print kv.get_collection('16S').find_one({'species':ssu_df['strain'][i]})
def get_links(group=None, perc_identity='99'): hits_collection = kv.get_collection('hits') group_hits = None if not os.path.isdir('circos/links/'): os.makedirs('circos/links/') out_name = 'circos/links/all_links_{}.txt'.format(perc_identity) if group: groups = core_hgt_groups() group_hits = sorted(groups, key=len, reverse=True)[group - 1] out_name = 'circos/links/group{}_links_{}.txt'.format(group, perc_identity) with open(out_name, 'w+') as out_handle: for species in hits_collection.find(): print species try: all_hits = species['core_hits_{}'.format(perc_identity)] hits_to_write = None if group: hits_to_write = {gene:all_hits[gene] for gene in all_hits if (species['species'], gene) in group_hits} else: hits_to_write = all_hits for gene in hits_to_write: if hits_to_write[gene]: s1_record = kv.get_mongo_record(species['species'], gene) s1_strain = kv.parse_species_name(species['species']) for hit in hits_to_write[gene]: s2_record = kv.get_mongo_record(hit[0], hit[1]) s2_strain = kv.parse_species_name(hit[0]) out_handle.write('{0}kvc_{1} {2} {3} {4}kvc_{5} {6} {7}\n'.format( s1_strain[2], s1_record['location']['contig'], s1_record['location']['start'], s1_record['location']['end'], s2_strain[2], s2_record['location']['contig'], s2_record['location']['start'], s2_record['location']['end'], ) ) except KeyError: pass
def get_islands(species_name, perc_identity='99'): """ For each species, combines HGT hits co-occurring within 5kb of eachother Returns list of lists of `(species, _id)` tuples """ islands = [] species_hits_list = [] # Add mongo_record for each hit in any gene all_hits = kv.get_collection('hits') species_hits = all_hits.find_one({'species':species_name})['core_hits_{}'.format(perc_identity)] for query_id in species_hits: if species_hits[query_id]: species_hits_list.append( kv.get_mongo_record(species_name, query_id) ) for entry_1 in species_hits_list: entry_recorded = False for entry_2 in species_hits_list: if entry_1 == entry_2: pass elif entry_1['location']['contig'] != entry_2['location']['contig']: pass else: location_1 = entry_1['location'] location_2 = entry_2['location'] if abs(location_1['end'] - location_2['start']) <= 5000: entry_recorded = True islands.append([ (entry_1['species'], str(entry_1['_id'])), (entry_2['species'], str(entry_2[ '_id'])) ]) if not entry_recorded: islands.append([(entry_1['species'], str(entry_1['_id']))]) return collapse_lists(islands)
def make_blast_db(source, name=None, remove_source=True): """ Produces BLAST database from `source` Optional - provide name (defaults to `source`) Set remove_source=False to keep fasta file (if created) Source types: - fasta file (use path, must end with `.fna`) - Mongo collection (use name of collection) - list of dicts containing at least keys `species`, `_id`, `dna_seq` - Mongo cursor eg. `collection.find({'key':value})` """ # If there's no directory for blast db's, create one if not os.path.isdir('blast_databases/'): os.makedirs('blast_databases/') output_fasta = None if os.path.isfile(source): # Input is fasta file? if source.endswith('.fna'): output_fasta = source if not name: name = os.path.basename(source)[:-4] remove_source = False else: print "Not a valid file type, use .fna" else: output_fasta = '{0}_all.fasta'.format(kv.db.name) genes = None with open(output_fasta, 'w+') as output_handle: if source in kv.get_collections(): genes = kv.get_collection(source).find() if not name: name = source elif type(source) == list: genes = source elif type(source) == Cursor: genes = source for gene in genes: output_handle.write('>{0}|{1}\n{2}\n'.format( gene['species'], gene['_id'], gene['dna_seq'], ) ) while not name: name = str(raw_input("enter name for BLAST database: ")) # calls makeblastdb from shell print "making a database!" Popen( ['makeblastdb', '-in', output_fasta, '-dbtype', 'nucl', '-out', 'blast_databases/{0}'.format(name), '-title', name, ] ).wait() # waits for this operation to terminate before moving on if remove_source: os.remove(output_fasta)
def import_file(some_genbank, collection): """ Import records from `some_genbank` into Mongo `collection` * Imports each coding sequence (CDS) as document of {'type':'gene'} * Imports up to one 16S rRNA sequences as document of {'type':'16S'} * Each document has info on species, contig and location, DNA sequence and (for CDS) amino acid sequence * Each gene in genbank file MUST have `locus_tag` feature. If it doesn't, use `add_locus_tags()` * Note - `add_locus_tags()` doesn't exist yet, will be similar to `FixGbk.validate_gbk()` """ with open(some_genbank, 'r') as open_file: collection = kv.get_collection(collection) # Each "record" in genbank file is read, corresponds to individual contigs for record in SeqIO.parse(open_file, 'gb'): current_contig = record.name try: current_species = record.annotations['source'] except KeyError: name = re.search(r'\w+\/(.+)\.\w+$', some_genbank) current_species = name.group(1) collection.insert_one({ 'species':current_species, 'contig':current_contig, 'dna_seq':str(record.seq), 'type':'contig' }) print "Importing {}".format(current_contig) ssu_gene = get_16S(record) if ssu_gene: try: locus_tag = ssu_gene[0].qualifiers['locus_tag'][0] except KeyError: locus_tag = None parsed_location = kv.get_gene_location(ssu_gene[0].location) gene_record = { 'species':current_species, 'location':{ 'contig':current_contig, 'start':parsed_location[0], 'end':parsed_location[1], 'strand':parsed_location[2], }, 'locus_tag':locus_tag, 'annotation':ssu_gene[0].qualifiers['product'][0], 'dna_seq':ssu_gene[1], 'type':'16S' } print "adding 16S gene!" collection.insert_one(gene_record) kv.get_collection('16S').insert_one(gene_record) for feature in record.features: if feature.type == 'CDS': parsed_location = kv.get_gene_location(feature.location) try: locus_tag = feature.qualifiers['locus_tag'][0] except KeyError: locus_tag = None gene_record = { 'species':current_species, 'location':{ 'contig':current_contig, 'start':parsed_location[0], 'end':parsed_location[1], 'strand':parsed_location[2], 'index':None }, 'locus_tag':locus_tag, 'annotation':feature.qualifiers['product'][0], 'dna_seq':get_dna_seq(feature, record), 'aa_seq':feature.qualifiers['translation'][0], 'type':'gene' } collection.insert_one(gene_record)
def output_hits_csv(): hits = kv.get_collection('hits') if not os.path.isdir('hits/'): os.makedirs('hits/') df_index = [ 'parent_locus', 'parent_annotation', 'parent_seq', 'parent_contig', 'parent_start', 'parent_end', 'parent_strand', 'hit_tag', 'hit_annotation', 'hit_seq', 'hit_contig', 'hit_start', 'hit_end', 'hit_strand', ] for record in hits.find(): query_species = record['species'] df = pd.DataFrame() for query_id in record['hits']: list_of_hits = record['hits'][query_id] if list_of_hits: query_record = kv.get_collection('core').find_one({'species':query_species, '_id':ObjectId(query_id)}) for hit in list_of_hits: hit_species = hit[0] hit_id = hit[1] hit_record = kv.get_collection('core').find_one({'species':hit_species, '_id':ObjectId(hit_id)}) hit_record['kvtag'] query_annotation = query_record['annotation'].replace(',','') hit_annotation = hit_record['annotation'].replace(',','') series = pd.Series( [query_record['kvtag'], query_annotation, query_record['dna_seq'], query_record['location']['contig'], query_record['location']['start'], query_record['location']['end'], query_record['location']['strand'], hit_record['kvtag'], hit_annotation, hit_record['dna_seq'], hit_record['location']['contig'], hit_record['location']['start'], hit_record['location']['end'], hit_record['location']['strand'], ], index=df_index, name=hit_record['species'] ) df=df.append(series) df.to_csv('hits/{}_hits.csv'.format(query_species), columns=df_index)
def output_loc_hist(species_1, species_2, ax): s1, id_list = make_indexed_fasta(species_1) s2 = make_species_fasta(species_2) if not os.path.isfile('pairwise_blast/{}_blastdb.nhr'.format(species_2)): Popen( ['makeblastdb', '-in', s2, '-dbtype', 'nucl', '-out', 'pairwise_blast/{}_blastdb'.format(species_2), '-title', os.path.basename(species_2), ] ).wait() indexed_blast = blast_one(s1, 'pairwise_blast/{}_blastdb'.format(species_2)) concatenated_subject = kv.concat_contigs(kv.get_collection(species_1)) xys = [] last_end = 0 for i in range(len(indexed_blast))[0::4]: # print indexed_blast[i:i+4] subject = concatenated_subject[ObjectId(kv.fasta_id_parse(indexed_blast[i])[1])] query = kv.get_mongo_record(*kv.fasta_id_parse(indexed_blast[i+1])) x1 = subject['location']['start'] if x1 <= last_end: x1 = last_end + 1 x2 = subject['location']['end']