def dnld_refseqs_for_taxid(taxid, filter_term, taxonomy, dir_cache_refseqs, query='', db='nuccore'): ft = None if filter_term == 'plastid': ft = '("chloroplast"[filter] OR "plastid"[filter])' else: ft = '("' + filter_term + '"[filter])' tax_terms = tuple(reversed(taxonomy.lineage_for_taxid(taxid)['names'])) for tax_term in tax_terms: if tax_term is None: tax_term = taxonomy.scientific_name_for_taxid(taxid) term = '"RefSeq"[Keyword] AND "{}"[Primary Organism] AND {}'.format( tax_term, ft) term = query + term accs = set(accs_eutil(search_eutil(db, term))) if len(accs) > 0: plural = 'sequences' if len(accs) == 1: plural = 'sequence' Log.msg( 'Found {} RefSeq {} {} for'.format(len(accs), filter_term, plural), tax_term) # Random sample ################################################### if len(accs) > 10: Log.wrn('Using a random sample of ten RefSeq sequences.') random.seed(a=len(accs), version=2) accs = set(random.sample(accs, 10)) ################################################################### break else: Log.wrn( 'No RefSeq {} sequences were found for'.format(filter_term), tax_term) cache_path = opj( dir_cache_refseqs, filter_term + '__' + tax_term.replace(' ', '_') + '.fasta') parsed_fasta_cache = {} if ope(cache_path): parsed_fasta_cache = read_fasta(cache_path, seq_type=SEQ_TYPE_NT, def_to_first_space=True) parsed_fasta_cache = seq_records_to_dict(parsed_fasta_cache) for acc in parsed_fasta_cache: if acc in accs: accs.remove(acc) if len(accs) > 0: parsed_fasta = dnld_ncbi_seqs(db, list(accs)) parsed_fasta = seq_records_to_dict(parsed_fasta, prepend_acc=True) parsed_fasta.update(parsed_fasta_cache) write_fasta(parsed_fasta, cache_path) return cache_path
def _process_downloaded_seq_data(efetch_txt: str, db: str, rettype: str, retmode: str): rec_list = list() if rettype == 'gb' and retmode == 'xml': rec_list = seq_records_gb(efetch_txt) elif rettype == 'fasta' and retmode == 'text': seq_type = None if db == 'nuccore': seq_type = SEQ_TYPE_NT elif db == 'protein': seq_type = SEQ_TYPE_AA rec_list = read_fasta(StringIO(efetch_txt), seq_type, parse_def=True) return rec_list
def dnld_prot_seqs(ss, prot_acc_user, aa_prot_ncbi_file, dir_cache_prj): if len(prot_acc_user) != 0: acc_old = set() if ope(aa_prot_ncbi_file): _ = read_fasta(aa_prot_ncbi_file, SEQ_TYPE_AA) acc_old = set([x.definition.split('|')[0] for x in _]) if acc_old == set(prot_acc_user): return prot_acc_user else: pickle_file = opj(dir_cache_prj, 'ncbi_prot_metadata_cache__' + ss) if ope(pickle_file): with open(pickle_file, 'rb') as f: pa_info = pickle.load(f) print() Log.inf('Downloading protein sequences from NCBI:', ss) _ = dnld_ncbi_seqs('protein', prot_acc_user, rettype='gb', retmode='xml') prot_acc_user_new = list() for rec in _: acc_ver = rec.accession_version defn = rec.definition organism = rec.organism prot_acc_user_new.append(acc_ver) defn_new = defn.split('[' + organism + ']')[0] defn_new = defn_new.lower().strip() defn_new = defn_new.replace(' ', '_').replace('-', '_') defn_new = defn_new.replace(',', '') defn_new = defn_new[0].upper() + defn_new[1:] defn_new = acc_ver + '|' + defn_new + '|' + organism defn_new = defn_new.replace(' ', '_').replace('-', '_') rec.definition = defn_new prot_acc_user = prot_acc_user_new write_fasta(_, aa_prot_ncbi_file) else: if ope(aa_prot_ncbi_file): osremove(aa_prot_ncbi_file) return prot_acc_user
def filter_queries(ss, aa_queries_file, min_query_length, max_query_length, max_query_identity, vsearch, prot_acc_user, overwrite, logging=True): if logging is True: print() Log.inf('Filtering AA query sequences:', ss) Log.msg('min_query_length:', str(min_query_length)) Log.msg('max_query_length:', str(max_query_length)) Log.msg('max_query_identity:', str(max_query_identity)) parsed_fasta_1 = filter_fasta_by_length(aa_queries_file, SEQ_TYPE_AA, min_query_length, max_query_length) tmp1 = aa_queries_file + '_temp1' tmp2 = aa_queries_file + '_temp2' for rec in parsed_fasta_1: rec.seq.gc_code = 1 rec.seq = rec.seq.untranslate() write_fasta(parsed_fasta_1, tmp1) run_cluster_fast(vsearch, max_query_identity, tmp1, tmp2) parsed_fasta_2 = read_fasta(tmp2, SEQ_TYPE_DNA, parse_def=True) prot_acc_user_new = list() for rec in parsed_fasta_2: rec.seq.gc_code = 1 rec.seq = rec.seq.translate() acc = rec.accession_version if acc in prot_acc_user: prot_acc_user_new.append(acc) if overwrite is True: write_fasta(parsed_fasta_2, aa_queries_file, prepend_acc=True) osremove(tmp1) osremove(tmp2) return prot_acc_user_new
def run_tblastn_on_assemblies(ss, assemblies, aa_queries_file, tblastn, dir_prj_assmbl_blast_results, blast_2_evalue, blast_2_max_hsps, blast_2_qcov_hsp_perc, blast_2_best_hit_overhang, blast_2_best_hit_score_edge, blast_2_max_target_seqs, threads, dir_cache_prj, dir_prj_ips): if len(assemblies) > 0: print() Log.inf('Running BLAST on assemblies:', ss) if tblastn is None: Log.err('tblastn is not available. Cannot continue. Exiting.') exit(0) else: Log.wrn('There are no assemblies. Nothing to do, stopping.') exit(0) cache_file = opj(dir_cache_prj, 'blast_2_settings_cache__' + ss) pickled = dict() settings = {'blast_2_evalue': blast_2_evalue, 'blast_2_max_hsps': blast_2_max_hsps, 'blast_2_qcov_hsp_perc': blast_2_qcov_hsp_perc, 'blast_2_best_hit_overhang': blast_2_best_hit_overhang, 'blast_2_best_hit_score_edge': blast_2_best_hit_score_edge, 'blast_2_max_target_seqs': blast_2_max_target_seqs, 'queries': seq_records_to_dict( read_fasta(aa_queries_file, SEQ_TYPE_AA))} Log.msg('evalue:', str(blast_2_evalue)) Log.msg('max_hsps:', str(blast_2_max_hsps)) Log.msg('qcov_hsp_perc:', str(blast_2_qcov_hsp_perc)) Log.msg('best_hit_overhang:', str(blast_2_best_hit_overhang)) Log.msg('best_hit_score_edge:', str(blast_2_best_hit_score_edge)) Log.msg('max_target_seqs:', str(blast_2_max_target_seqs)) print() for a in assemblies: assmbl_src = a['src'] assmbl_name = a['name'] if assmbl_src != 'user_fasta': if assmbl_name.endswith('__' + ss): assmbl_name = assmbl_name.replace('__' + ss, '') else: continue assmbl_blast_db_path = a['blast_db_path'] assmbl_genetic_code = a['gc_id'] ips_json_dump_path = opj(dir_prj_ips, assmbl_name + '_ann_ips__' + ss + '.json') _ = opj(dir_prj_assmbl_blast_results, assmbl_name + '__' + ss + '.tsv') if ope(_) and ope(cache_file): with open(cache_file, 'rb') as f: pickled = pickle.load(f) if ope(_) and pickled == settings: # Log.msg('The provided BLAST settings and query sequences did ' # 'not change since the previous run.') Log.msg('BLAST results already exist:', assmbl_name) else: Log.msg('Running tblastn on: ' + assmbl_name, ss) if ope(ips_json_dump_path): osremove(ips_json_dump_path) run_blast(exec_file=tblastn, task='tblastn', threads=threads, db_path=assmbl_blast_db_path, queries_file=aa_queries_file, out_file=_, evalue=blast_2_evalue, max_hsps=blast_2_max_hsps, qcov_hsp_perc=blast_2_qcov_hsp_perc, best_hit_overhang=blast_2_best_hit_overhang, best_hit_score_edge=blast_2_best_hit_score_edge, max_target_seqs=blast_2_max_target_seqs, db_genetic_code=assmbl_genetic_code, out_cols=BLST_RES_COLS_2) a['blast_hits_aa__' + ss] = parse_blast_results_file(_, BLST_RES_COLS_2) with open(cache_file, 'wb') as f: pickle.dump(settings, f, protocol=PICKLE_PROTOCOL)
def main(): """Run the script.""" # Prepare initial logger (before we know the log file path) -------------- prj_log_file_suffix = time_stamp() + '.log' log_stream = StringIO() Log.set_colors(COLORS) Log.set_file(log_stream) Log.set_write(True) # Prepare configuration directory ---------------------------------------- if ope(DIR_CFG): Log.inf('Found configuration directory:', DIR_CFG) else: Log.wrn('Creating configuration directory:', DIR_CFG) make_dirs(DIR_CFG) print() # Check for dependencies ------------------------------------------------- Log.inf('Checking for dependencies.') make_dirs(DIR_DEP) make_dirs(DIR_KRK) seqtk = deps.dep_check_seqtk(DIR_DEP, FORCE_DEPS) trimmomatic, adapters = deps.dep_check_trimmomatic(DIR_DEP) fasterq_dump = deps.dep_check_sra_toolkit(DIR_DEP, OS_ID, DIST_ID, DEBIAN_DISTS, REDHAT_DISTS, FORCE_DEPS) makeblastdb, _, tblastn = deps.dep_check_blast(DIR_DEP, OS_ID, DIST_ID, DEBIAN_DISTS, REDHAT_DISTS, FORCE_DEPS) vsearch = deps.dep_check_vsearch(DIR_DEP, OS_ID, DIST_ID, DEBIAN_DISTS, REDHAT_DISTS, FORCE_DEPS) spades = deps.dep_check_spades(DIR_DEP, OS_ID, FORCE_DEPS) bowtie2, bowtie2_build = deps.dep_check_bowtie2(DIR_DEP, OS_ID, FORCE_DEPS) rcorrector = deps.dep_check_rcorrector(DIR_DEP, FORCE_DEPS) kraken2, kraken2_build = deps.dep_check_kraken2(DIR_DEP, OS_ID, RELEASE_NAME, FORCE_DEPS) print() kraken2_dbs = deps.dnld_kraken2_dbs(DIR_KRK) if INSTALL_DEPS is True or DNLD_KRAKEN_DBS is True: exit(0) print() # Initialize NCBI taxonomy database -------------------------------------- tax = Taxonomy() if tax.is_initialized() is False: tax.init(data_dir_path=DIR_TAX, logger=Log) print() # Parse configuration file ----------------------------------------------- Log.inf('Reading configuration file:', CONFIG_FILE_PATH) _ = config_file_parse(CONFIG_FILE_PATH, tax) allow_no_stop_cod = _['allow_no_stop_cod'] allow_no_strt_cod = _['allow_no_strt_cod'] allow_non_aug = _['allow_non_aug'] blast_1_evalue = _['blast_1_evalue'] blast_1_max_hsps = _['blast_1_max_hsps'] blast_1_qcov_hsp_perc = _['blast_1_qcov_hsp_perc'] blast_1_best_hit_overhang = _['blast_1_best_hit_overhang'] blast_1_best_hit_score_edge = _['blast_1_best_hit_score_edge'] blast_1_max_target_seqs = _['blast_1_max_target_seqs'] blast_2_evalue = _['blast_2_evalue'] blast_2_max_hsps = _['blast_2_max_hsps'] blast_2_qcov_hsp_perc = _['blast_2_qcov_hsp_perc'] blast_2_best_hit_overhang = _['blast_2_best_hit_overhang'] blast_2_best_hit_score_edge = _['blast_2_best_hit_score_edge'] blast_2_max_target_seqs = _['blast_2_max_target_seqs'] dir_out = _['output_directory'] email = _['email'] requery_after = _['requery_after'] fq_pe = _['fq_pe'] fq_se = _['fq_se'] should_run_rcorrector = _['should_run_rcorrector'] should_run_ipr = _['should_run_ipr'] bt2_order = _['bt2_order'] kraken_confidence = _['kraken_confidence'] krkn_order = _['krkn_order'] prepend_assmbl = _['prepend_assmbl'] prj_name = _['project_name'] sras = _['sras'] tax_group = _['tax_group'] # tax_group_name = _['tax_group_name'] tax_ids_user = _['tax_ids'] user_assemblies = _['assmbl'] print() # Parse search strategies file ------------------------------------------- if SS_FILE_PATH is not None: Log.inf('Reading search strategies file:', SS_FILE_PATH) sss = ss_file_parse(SS_FILE_PATH) else: Log.wrn('Search strategies file was not provided.\n' + 'Will process reads, assemblies and then stop.') sss = dict() print() # Create output directory ------------------------------------------------ if dir_out is not None: if ope(dir_out): Log.inf('Found output directory:', dir_out) else: Log.wrn('Creating output directory:', dir_out) make_dirs(dir_out) print() # Write Kakapo version information to the output directory --------------- version_file = opj(dir_out, 'kakapo_version.txt') if ope(version_file): with open(version_file, 'r') as f: version_prev = f.read().strip() if __version__ != version_prev: Log.wrn('The output directory contains data produced by a ' + 'different version of Kakapo: ' + version_prev + '.\nThe currently running version is: ' + __version__ + '.\n' + 'Delete "kakapo_version.txt" file located in the ' + 'output directory if you would like to continue.') exit(0) with open(version_file, 'w') as f: f.write(__version__) # Create subdirectories in the output directory -------------------------- _ = prepare_output_directories(dir_out, prj_name) dir_temp = _['dir_temp'] dir_cache_pfam_acc = _['dir_cache_pfam_acc'] dir_cache_fq_minlen = _['dir_cache_fq_minlen'] dir_cache_prj = _['dir_cache_prj'] dir_cache_refseqs = _['dir_cache_refseqs'] dir_prj_logs = _['dir_prj_logs'] dir_prj_queries = _['dir_prj_queries'] dir_fq_data = _['dir_fq_data'] dir_fq_cor_data = _['dir_fq_cor_data'] dir_fq_trim_data = _['dir_fq_trim_data'] dir_fq_filter_bt2_data = _['dir_fq_filter_bt2_data'] dir_fq_filter_krkn2_data = _['dir_fq_filter_krkn2_data'] dir_fa_trim_data = _['dir_fa_trim_data'] dir_blast_fa_trim = _['dir_blast_fa_trim'] dir_prj_blast_results_fa_trim = _['dir_prj_blast_results_fa_trim'] dir_prj_vsearch_results_fa_trim = _['dir_prj_vsearch_results_fa_trim'] dir_prj_spades_assemblies = _['dir_prj_spades_assemblies'] dir_prj_blast_assmbl = _['dir_prj_blast_assmbl'] dir_prj_assmbl_blast_results = _['dir_prj_assmbl_blast_results'] dir_prj_transcripts = _['dir_prj_transcripts'] dir_prj_ips = _['dir_prj_ips'] dir_prj_transcripts_combined = _['dir_prj_transcripts_combined'] # Prepare logger --------------------------------------------------------- prj_log_file = opj(dir_prj_logs, prj_name + '_' + prj_log_file_suffix) with open(prj_log_file, 'w') as f: f.write(SCRIPT_INFO.strip() + '\n\n' + log_stream.getvalue()) Log.set_colors(COLORS) Log.set_file(prj_log_file) Log.set_write(True) log_stream.close() # Resolve descending taxonomy nodes -------------------------------------- tax_ids = tax.all_descending_taxids_for_taxids([tax_group]) # Pfam uniprot accessions ------------------------------------------------ pfam_uniprot_acc = OrderedDict() for ss in sss: pfam_acc = sss[ss]['pfam_families'] pfam_uniprot_acc[ss] = pfam_uniprot_accessions(ss, pfam_acc, tax_ids, dir_cache_pfam_acc) # Download Pfam uniprot sequences if needed ------------------------------ aa_uniprot_files = OrderedDict() for ss in sss: aa_uniprot_files[ss] = opj(dir_prj_queries, 'aa_uniprot__' + ss + '.fasta') # ToDo: add support for the requery_after parameter. dnld_pfam_uniprot_seqs(ss, pfam_uniprot_acc[ss], aa_uniprot_files[ss], dir_cache_prj) # User provided entrez query --------------------------------------------- prot_acc_user_from_query = OrderedDict() for ss in sss: entrez_queries = sss[ss]['entrez_search_queries'] prot_acc_user_from_query[ss] = user_entrez_search( ss, entrez_queries, dir_cache_prj, requery_after) # User provided protein accessions --------------------------------------- prot_acc_user = OrderedDict() for ss in sss: print() prot_acc_all = sorted( set(sss[ss]['ncbi_accessions_aa'] + prot_acc_user_from_query[ss])) prot_acc_user[ss] = user_protein_accessions(ss, prot_acc_all, dir_cache_prj, tax) # Download from NCBI if needed ------------------------------------------- aa_prot_ncbi_files = OrderedDict() for ss in sss: aa_prot_ncbi_files[ss] = opj(dir_prj_queries, 'aa_prot_ncbi__' + ss + '.fasta') prot_acc_user[ss] = dnld_prot_seqs(ss, prot_acc_user[ss], aa_prot_ncbi_files[ss], dir_cache_prj) # User provided protein sequences ---------------------------------------- aa_prot_user_files = OrderedDict() for ss in sss: user_queries = sss[ss]['fasta_files_aa'] aa_prot_user_files[ss] = opj(dir_prj_queries, 'aa_prot_user__' + ss + '.fasta') user_aa_fasta(ss, user_queries, aa_prot_user_files[ss]) # Combine all AA queries ------------------------------------------------- print() aa_queries_files = OrderedDict() for ss in sss: aa_queries_files[ss] = opj(dir_prj_queries, 'aa_all__' + ss + '.fasta') combine_aa_fasta(ss, [ aa_uniprot_files[ss], aa_prot_ncbi_files[ss], aa_prot_user_files[ss] ], aa_queries_files[ss]) # Filter AA queries ------------------------------------------------------ prot_acc_user_filtered = OrderedDict() for ss in sss: min_query_length = sss[ss]['min_query_length'] max_query_length = sss[ss]['max_query_length'] max_query_identity = sss[ss]['max_query_identity'] # Dereplicate all queries filter_queries(ss, aa_queries_files[ss], min_query_length, max_query_length, max_query_identity, vsearch, prot_acc_user[ss], overwrite=True) # Dereplicate only NCBI queries. CDS for these will be downloaded # later for reference. if ope(aa_prot_ncbi_files[ss]): prot_acc_user_filtered[ss] = filter_queries(ss, aa_prot_ncbi_files[ss], min_query_length, max_query_length, max_query_identity, vsearch, prot_acc_user[ss], overwrite=False, logging=False) # Download SRA run metadata if needed ------------------------------------ sra_runs_info, sras_acceptable = dnld_sra_info(sras, dir_cache_prj) # Download SRA run FASTQ files if needed --------------------------------- x, y, z = dnld_sra_fastq_files(sras_acceptable, sra_runs_info, dir_fq_data, fasterq_dump, THREADS, dir_temp) se_fastq_files_sra = x pe_fastq_files_sra = y sra_runs_info = z # User provided FASTQ files ---------------------------------------------- se_fastq_files_usr, pe_fastq_files_usr = user_fastq_files(fq_se, fq_pe) # Collate FASTQ file info ------------------------------------------------ se_fastq_files = se_fastq_files_sra.copy() se_fastq_files.update(se_fastq_files_usr) pe_fastq_files = pe_fastq_files_sra.copy() pe_fastq_files.update(pe_fastq_files_usr) def gc_tt(k, d, tax): taxid = d[k]['tax_id'] gc = tax.genetic_code_for_taxid(taxid) d[k]['gc_id'] = gc d[k]['gc_tt'] = TranslationTable(gc) gc_mito = None tt_mito = None gc_plastid = None tt_plastid = None if tax.is_eukaryote(taxid) is True: gc_mito = tax.mito_genetic_code_for_taxid(taxid) if gc_mito != '0': tt_mito = TranslationTable(gc_mito) if tax.contains_plastid(taxid) is True: gc_plastid = tax.plastid_genetic_code_for_taxid(taxid) if gc_plastid != '0': tt_plastid = TranslationTable(gc_plastid) d[k]['gc_id_mito'] = gc_mito d[k]['gc_tt_mito'] = tt_mito d[k]['gc_id_plastid'] = gc_plastid d[k]['gc_tt_plastid'] = tt_plastid for se in se_fastq_files: gc_tt(se, se_fastq_files, tax) for pe in pe_fastq_files: gc_tt(pe, pe_fastq_files, tax) # Minimum acceptable read length ----------------------------------------- min_accept_read_len(se_fastq_files, pe_fastq_files, dir_temp, dir_cache_fq_minlen, vsearch) # Run Rcorrector --------------------------------------------------------- run_rcorrector(se_fastq_files, pe_fastq_files, dir_fq_cor_data, rcorrector, THREADS, dir_temp, should_run_rcorrector) # File name patterns ----------------------------------------------------- a, b, c, d, e = file_name_patterns() pe_trim_fq_file_patterns = a pe_trim_fa_file_patterns = b pe_blast_db_file_patterns = c pe_blast_results_file_patterns = d pe_vsearch_results_file_patterns = e # Run Trimmomatic -------------------------------------------------------- run_trimmomatic(se_fastq_files, pe_fastq_files, dir_fq_trim_data, trimmomatic, adapters, pe_trim_fq_file_patterns, THREADS) # Run Bowtie 2 ----------------------------------------------------------- run_bt2_fq(se_fastq_files, pe_fastq_files, dir_fq_filter_bt2_data, bowtie2, bowtie2_build, THREADS, dir_temp, bt2_order, pe_trim_fq_file_patterns, tax, dir_cache_refseqs) # Run Kraken2 ------------------------------------------------------------ run_kraken2(krkn_order, kraken2_dbs, se_fastq_files, pe_fastq_files, dir_fq_filter_krkn2_data, kraken_confidence, kraken2, THREADS, dir_temp, pe_trim_fq_file_patterns) se_fastq_files = OrderedDict(se_fastq_files) pe_fastq_files = OrderedDict(pe_fastq_files) se_fastq_files = OrderedDict( sorted(se_fastq_files.items(), key=lambda x: x[1]['filter_path_fq'])) pe_fastq_files = OrderedDict( sorted(pe_fastq_files.items(), key=lambda x: x[1]['filter_path_fq'])) # Stop After Filter ------------------------------------------------------ if STOP_AFTER_FILTER is True: Log.wrn('Stopping after Kraken2/Bowtie2 filtering step as requested.') exit(0) # Convert filtered FASTQ files to FASTA ---------------------------------- filtered_fq_to_fa(se_fastq_files, pe_fastq_files, dir_fa_trim_data, seqtk, pe_trim_fa_file_patterns) # Run makeblastdb on reads ----------------------------------------------- makeblastdb_fq(se_fastq_files, pe_fastq_files, dir_blast_fa_trim, makeblastdb, pe_blast_db_file_patterns) # Check if there are any query sequences. any_queries = False for ss in sss: if stat(aa_queries_files[ss]).st_size == 0: continue else: any_queries = True # Run tblastn on reads --------------------------------------------------- for ss in sss: if stat(aa_queries_files[ss]).st_size == 0: continue changed_blast_1 = run_tblastn_on_reads( se_fastq_files, pe_fastq_files, aa_queries_files[ss], tblastn, blast_1_evalue, blast_1_max_hsps, blast_1_qcov_hsp_perc, blast_1_best_hit_overhang, blast_1_best_hit_score_edge, blast_1_max_target_seqs, dir_prj_blast_results_fa_trim, pe_blast_results_file_patterns, ss, THREADS, seqtk, vsearch, dir_cache_prj) if changed_blast_1 is True: if ope(dir_prj_vsearch_results_fa_trim): rmtree(dir_prj_vsearch_results_fa_trim) if ope(dir_prj_spades_assemblies): rmtree(dir_prj_spades_assemblies) if ope(dir_prj_blast_assmbl): rmtree(dir_prj_blast_assmbl) if ope(dir_prj_assmbl_blast_results): rmtree(dir_prj_assmbl_blast_results) if ope(dir_prj_transcripts): rmtree(dir_prj_transcripts) if ope(dir_prj_transcripts_combined): rmtree(dir_prj_transcripts_combined) prepare_output_directories(dir_out, prj_name) # Run vsearch on reads --------------------------------------------------- # should_run_vsearch = False # for ss in sss: # if stat(aa_queries_files[ss]).st_size == 0: # continue # else: # should_run_vsearch = True # break # if should_run_vsearch is True: # print() # Log.inf('Checking if Vsearch should be run.') for ss in sss: if stat(aa_queries_files[ss]).st_size == 0: continue print() Log.inf('Checking if Vsearch should be run:', ss) run_vsearch_on_reads(se_fastq_files, pe_fastq_files, vsearch, dir_prj_vsearch_results_fa_trim, pe_vsearch_results_file_patterns, ss, seqtk) # Run SPAdes ------------------------------------------------------------- # should_run_spades = False # for ss in sss: # if stat(aa_queries_files[ss]).st_size == 0: # continue # else: # should_run_spades = True # break # if should_run_spades is True: # print() # Log.inf('Checking if SPAdes should be run.') for ss in sss: if stat(aa_queries_files[ss]).st_size == 0: for se in se_fastq_files: se_fastq_files[se]['spades_assembly' + '__' + ss] = None for pe in pe_fastq_files: pe_fastq_files[pe]['spades_assembly' + '__' + ss] = None continue print() Log.inf('Checking if SPAdes should be run:', ss) run_spades(se_fastq_files, pe_fastq_files, dir_prj_spades_assemblies, spades, dir_temp, ss, THREADS, RAM) # Combine SPAdes and user provided assemblies ---------------------------- assemblies = combine_assemblies(se_fastq_files, pe_fastq_files, user_assemblies, tax, sss) # Run makeblastdb on assemblies ----------------------------------------- makeblastdb_assemblies(assemblies, dir_prj_blast_assmbl, makeblastdb) if any_queries is False: Log.wrn('No query sequences were provided.') # Run tblastn on assemblies ---------------------------------------------- for ss in sss: if stat(aa_queries_files[ss]).st_size == 0: continue should_run_tblastn = False for a in assemblies: assmbl_src = a['src'] assmbl_name = a['name'] if assmbl_src != 'user_fasta': if assmbl_name.endswith('__' + ss): should_run_tblastn = True break else: should_run_tblastn = True break if should_run_tblastn is False: print() Log.inf('Will not run BLAST. No transcripts exist:', ss) continue blast_2_evalue_ss = sss[ss]['blast_2_evalue'] blast_2_max_hsps_ss = sss[ss]['blast_2_max_hsps'] blast_2_qcov_hsp_perc_ss = sss[ss]['blast_2_qcov_hsp_perc'] blast_2_best_hit_overhang_ss = sss[ss]['blast_2_best_hit_overhang'] blast_2_best_hit_score_edge_ss = sss[ss]['blast_2_best_hit_score_edge'] blast_2_max_target_seqs_ss = sss[ss]['blast_2_max_target_seqs'] if blast_2_evalue_ss is None: blast_2_evalue_ss = blast_2_evalue if blast_2_max_hsps_ss is None: blast_2_max_hsps_ss = blast_2_max_hsps if blast_2_qcov_hsp_perc_ss is None: blast_2_qcov_hsp_perc_ss = blast_2_qcov_hsp_perc if blast_2_best_hit_overhang_ss is None: blast_2_best_hit_overhang_ss = blast_2_best_hit_overhang if blast_2_best_hit_score_edge_ss is None: blast_2_best_hit_score_edge_ss = blast_2_best_hit_score_edge if blast_2_max_target_seqs_ss is None: blast_2_max_target_seqs_ss = blast_2_max_target_seqs run_tblastn_on_assemblies( ss, assemblies, aa_queries_files[ss], tblastn, dir_prj_assmbl_blast_results, blast_2_evalue_ss, blast_2_max_hsps_ss, blast_2_qcov_hsp_perc_ss, blast_2_best_hit_overhang_ss, blast_2_best_hit_score_edge_ss, blast_2_max_target_seqs_ss, THREADS, dir_cache_prj, dir_prj_ips) # Prepare BLAST hits for analysis: find ORFs, translate ------------------ for ss in sss: if stat(aa_queries_files[ss]).st_size == 0: continue min_target_orf_len_ss = sss[ss]['min_target_orf_length'] max_target_orf_len_ss = sss[ss]['max_target_orf_length'] organelle = sss[ss]['organelle'] blast_2_qcov_hsp_perc_ss = sss[ss]['blast_2_qcov_hsp_perc'] if blast_2_qcov_hsp_perc_ss is None: blast_2_qcov_hsp_perc_ss = blast_2_qcov_hsp_perc find_orfs_translate(ss, assemblies, dir_prj_transcripts, seqtk, dir_temp, prepend_assmbl, min_target_orf_len_ss, max_target_orf_len_ss, allow_non_aug, allow_no_strt_cod, allow_no_stop_cod, tax, tax_group, tax_ids_user, blast_2_qcov_hsp_perc_ss, organelle) # GFF3 files from kakapo results JSON files ------------------------------ # print() for ss in sss: if stat(aa_queries_files[ss]).st_size == 0: continue gff_from_json(ss, assemblies, dir_prj_ips, dir_prj_transcripts_combined, prj_name) # Run InterProScan 5 ----------------------------------------------------- if should_run_ipr is True: print() ss_names = tuple(sss.keys()) # Determine the length of printed strings, for better spacing -------- max_title_a_len = 0 max_run_id_len = 0 for a in assemblies: for ss in ss_names: if 'transcripts_aa_orf_fasta_file__' + ss not in a: continue aa_file = a['transcripts_aa_orf_fasta_file__' + ss] if aa_file is None: continue assmbl_name = a['name'] run_id = ss + '_' + assmbl_name max_run_id_len = max(len(run_id), max_run_id_len) seqs = seq_records_to_dict(read_fasta(aa_file, SEQ_TYPE_AA)) # Filter all ORFs except the first one. for seq_def in tuple(seqs.keys()): seq_def_prefix = seq_def.split(' ')[0] if seq_def_prefix.endswith('ORF001'): max_title_a_len = max(len(seq_def_prefix), max_title_a_len) max_title_a_len += 2 max_run_id_len += 2 # -------------------------------------------------------------------- parallel_run_count = min(THREADS, len(ss_names)) def run_inter_pro_scan_parallel(ss): if stat(aa_queries_files[ss]).st_size == 0: return run_inter_pro_scan(ss, assemblies, email, dir_prj_ips, dir_cache_prj, parallel_run_count, max_title_a_len, max_run_id_len) # GFF3 files from kakapo and InterProScan 5 results JSON files gff_from_json(ss, assemblies, dir_prj_ips, dir_prj_transcripts_combined, prj_name) Parallel(n_jobs=parallel_run_count, verbose=0, require='sharedmem')(delayed(run_inter_pro_scan_parallel)(ss) for ss in ss_names) # Download CDS for NCBI protein queries ---------------------------------- print() prot_cds_ncbi_files = OrderedDict() def dnld_cds_for_ncbi_prot_acc_parallel(ss): if stat(aa_queries_files[ss]).st_size == 0: return if ss not in prot_acc_user_filtered: return prot_cds_ncbi_files[ss] = opj( dir_prj_transcripts_combined, prj_name + '_ncbi_query_cds__' + ss + '.fasta') if len(prot_acc_user_filtered[ss]) > 0: dnld_cds_for_ncbi_prot_acc(ss, prot_acc_user_filtered[ss], prot_cds_ncbi_files[ss], tax, dir_cache_prj) ss_names = tuple(sss.keys()) Parallel(n_jobs=2, verbose=0, require='sharedmem')( delayed(dnld_cds_for_ncbi_prot_acc_parallel)(ss) for ss in ss_names) # ------------------------------------------------------------------------ rmtree(dir_temp) # ------------------------------------------------------------------------ rerun = input('\nRepeat ([y]/n)? ').lower().strip() if rerun.startswith('y') or rerun == '': print() return False else: print('\nExiting...') return True
def run_spades(se_fastq_files, pe_fastq_files, dir_spades_assemblies, spades, dir_temp, ss, threads, ram): if len(se_fastq_files) > 0 or len(pe_fastq_files) > 0: if spades is None: Log.err('SPAdes is not available. Cannot continue. Exiting.') exit(0) for se in se_fastq_files: dir_results = opj(dir_spades_assemblies, se + '__' + ss) fq_path = se_fastq_files[se]['vsearch_results_path' + '__' + ss] se_fastq_files[se]['spades_assembly' + '__' + ss] = None if ope(dir_results): Log.msg('SPAdes assembly already exists:', se) else: make_dirs(dir_results) Log.msg('Running SPAdes on:', se) run_spades_se(spades, out_dir=dir_results, input_file=fq_path, threads=threads, memory=ram, rna=True) assmbl_path = opj(dir_results, 'transcripts.fasta') if ope(assmbl_path): count = len(read_fasta(assmbl_path, SEQ_TYPE_NT)) tr_str = ' transcripts.' if count == 1: tr_str = ' transcript.' Log.msg('SPAdes produced ' + str(count) + tr_str, False) se_fastq_files[se]['spades_assembly' + '__' + ss] = assmbl_path else: Log.wrn('SPAdes produced no transcripts.', False) for pe in pe_fastq_files: dir_results = opj(dir_spades_assemblies, pe + '__' + ss) fq_paths = pe_fastq_files[pe]['vsearch_results_path' + '__' + ss] pe_fastq_files[pe]['spades_assembly' + '__' + ss] = None if ope(dir_results): Log.msg('SPAdes assembly already exists:', pe) else: make_dirs(dir_results) Log.msg('Running SPAdes on: ' + pe) if osstat(fq_paths[0]).st_size > 0 and \ osstat(fq_paths[1]).st_size > 0: run_spades_pe(spades, out_dir=dir_results, input_files=fq_paths, threads=threads, memory=ram, rna=True) else: _ = opj(dir_temp, 'temp.fasta') combine_text_files(fq_paths, _) run_spades_se(spades, out_dir=dir_results, input_file=_, threads=threads, memory=ram, rna=True) osremove(_) assmbl_path = opj(dir_results, 'transcripts.fasta') if ope(assmbl_path): count = len(read_fasta(assmbl_path, SEQ_TYPE_NT)) tr_str = ' transcripts.' if count == 1: tr_str = ' transcript.' Log.msg('SPAdes produced ' + str(count) + tr_str, False) pe_fastq_files[pe]['spades_assembly' + '__' + ss] = assmbl_path else: Log.wrn('SPAdes produced no transcripts.', False)
def run_inter_pro_scan(ss, assemblies, email, dir_prj_ips, dir_cache_prj, parallel_run_count, max_title_a_len, max_run_id_len): delay = 0.25 for a in assemblies: if 'transcripts_aa_orf_fasta_file__' + ss not in a: continue aa_file = a['transcripts_aa_orf_fasta_file__' + ss] if aa_file is None: continue assmbl_name = a['name'] json_dump_file_path = opj(dir_prj_ips, assmbl_name + '_ann_ips__' + ss + '.json') if ope(json_dump_file_path): Log.inf('InterProScan results for assembly ' + assmbl_name + ', ' 'search strategy ' + ss + ' have already been downloaded.') continue else: Log.inf('Running InterProScan on translated ' + ss + ' from ' + assmbl_name + '.') seqs = seq_records_to_dict(read_fasta(aa_file, SEQ_TYPE_AA)) # Filter all ORFs except the first one. for seq_def in tuple(seqs.keys()): seq_def_prefix = seq_def.split(' ')[0] if not seq_def_prefix.endswith('ORF001'): del seqs[seq_def] seqs = OrderedDict( sorted(seqs.items(), key=lambda x: x[0].split(' ')[1], reverse=True)) run_id = ss + '_' + assmbl_name _ = opj(dir_cache_prj, 'ips5_cache_done_' + run_id) if ope(_): with open(_, 'rb') as f: jobs = pickle.load(f) else: jobs = job_runner(email=email, dir_cache=dir_cache_prj, seqs=seqs, run_id=run_id, parallel_run_count=parallel_run_count, max_title_a_len=max_title_a_len, max_run_id_len=max_run_id_len) with open(_, 'wb') as f: pickle.dump(jobs, f, protocol=PICKLE_PROTOCOL) Log.inf('Downloading InterProScan results for ' + ss + ' in ' + assmbl_name + '.') all_ips_results = {} # Nicer printing for i, job in enumerate(jobs['finished']): job_id = jobs['finished'][job] titles_ab = split_seq_defn(job) title_a = titles_ab[0] progress = round(((i + 1) / len(jobs['finished'])) * 100) progress_str = '{:3d}'.format(progress) + '%' msg = (' ' * 12 + title_a.ljust(max_title_a_len) + run_id.ljust(max_run_id_len) + progress_str.rjust(4) + ' ' + job_id) Log.msg(msg) sleep(delay) ips_json = result_json(job_id) if ips_json is None: continue # ips_version = ips_json['interproscan-version'] ips_json = ips_json['results'] # These fields are set to 'EMBOSS_001' by default # Delete them del ips_json[0]['xref'] job_no_def = job.split(' ')[0] all_ips_results[job_no_def] = ips_json with open(json_dump_file_path, 'w') as f: json.dump(all_ips_results, f, sort_keys=True, indent=4) # Removes cached jobs file. osremove(_)
def run_tblastn_on_reads(se_fastq_files, pe_fastq_files, aa_queries_file, tblastn, blast_1_evalue, blast_1_max_hsps, blast_1_qcov_hsp_perc, blast_1_best_hit_overhang, blast_1_best_hit_score_edge, blast_1_max_target_seqs, dir_blast_results_fa_trim, fpatt, ss, threads, seqtk, vsearch, dir_cache_prj): changed_blast_1 = False if len(se_fastq_files) > 0 or len(pe_fastq_files) > 0: print() Log.inf('Running BLAST on reads:', ss) if tblastn is None: Log.err('tblastn is not available. Cannot continue. Exiting.') exit(0) if vsearch is None: Log.err('vsearch is not available. Cannot continue. Exiting.') exit(0) if seqtk is None: Log.err('seqtk is not available. Cannot continue. Exiting.') exit(0) cache_file = opj(dir_cache_prj, 'blast_1_settings_cache__' + ss) pickled = dict() settings = { 'blast_1_evalue': blast_1_evalue, 'blast_1_max_hsps': blast_1_max_hsps, 'blast_1_qcov_hsp_perc': blast_1_qcov_hsp_perc, 'blast_1_best_hit_overhang': blast_1_best_hit_overhang, 'blast_1_best_hit_score_edge': blast_1_best_hit_score_edge, 'blast_1_max_target_seqs': blast_1_max_target_seqs, 'queries': seq_records_to_dict(read_fasta(aa_queries_file, SEQ_TYPE_AA)) } Log.msg('evalue:', str(blast_1_evalue)) Log.msg('max_hsps:', str(blast_1_max_hsps)) Log.msg('qcov_hsp_perc:', str(blast_1_qcov_hsp_perc)) Log.msg('best_hit_overhang:', str(blast_1_best_hit_overhang)) Log.msg('best_hit_score_edge:', str(blast_1_best_hit_score_edge)) Log.msg('max_target_seqs:', str(blast_1_max_target_seqs)) print() # FixMe: Expose in configuration files? ident = 0.85 for se in se_fastq_files: dir_results = opj(dir_blast_results_fa_trim, se) blast_db_path = se_fastq_files[se]['blast_db_path'] fq_path = se_fastq_files[se]['filter_path_fq'] out_f = opj(dir_results, se + '__' + ss + '.txt') out_f_fastq = out_f.replace('.txt', '.fastq') out_f_fasta = out_f.replace('.txt', '.fasta') se_fastq_files[se]['blast_results_path' + '__' + ss] = out_f_fasta genetic_code = se_fastq_files[se]['gc_id'] if ope(out_f_fasta) and ope(cache_file): with open(cache_file, 'rb') as f: pickled = pickle.load(f) if ope(out_f_fasta) and pickled == settings: # Log.msg('The provided BLAST settings and query sequences did ' # 'not change since the previous run.') Log.msg('BLAST results already exist:', se) else: changed_blast_1 = True make_dirs(dir_results) Log.msg('Running tblastn on: ' + basename(blast_db_path), ss) run_blast(exec_file=tblastn, task='tblastn', threads=threads, db_path=blast_db_path, queries_file=aa_queries_file, out_file=out_f, evalue=blast_1_evalue, max_hsps=blast_1_max_hsps, qcov_hsp_perc=blast_1_qcov_hsp_perc, best_hit_overhang=blast_1_best_hit_overhang, best_hit_score_edge=blast_1_best_hit_score_edge, max_target_seqs=blast_1_max_target_seqs, db_genetic_code=genetic_code, out_cols=BLST_RES_COLS_1) Log.inf('Extracting unique BLAST hits using Seqtk:', ss) keep_unique_lines_in_file(out_f) seqtk_extract_reads(seqtk, fq_path, out_f_fastq, out_f) seqtk_fq_to_fa(seqtk, out_f_fastq, out_f_fasta) osremove(out_f) osremove(out_f_fastq) out_f_fasta_temp = out_f_fasta + '_temp' copyfile(out_f_fasta, out_f_fasta_temp) run_cluster_fast(vsearch, ident, out_f_fasta_temp, out_f_fasta) osremove(out_f_fasta_temp) for pe in pe_fastq_files: dir_results = opj(dir_blast_results_fa_trim, pe) blast_db_paths = pe_fastq_files[pe]['blast_db_path'] fq_paths = pe_fastq_files[pe]['filter_path_fq'] out_fs = [x.replace('@D@', dir_results) for x in fpatt] out_fs = [x.replace('@N@', pe) for x in out_fs] out_fs = [x.replace('@Q@', ss) for x in out_fs] out_fs_fastq = [x.replace('.txt', '.fastq') for x in out_fs] out_fs_fasta = [x.replace('.txt', '.fasta') for x in out_fs] out_f_fasta = opj(dir_results, pe + '__' + ss + '.fasta') pe_fastq_files[pe]['blast_results_path' + '__' + ss] = out_f_fasta genetic_code = pe_fastq_files[pe]['gc_id'] if ope(out_f_fasta) and ope(cache_file): with open(cache_file, 'rb') as f: pickled = pickle.load(f) if ope(out_f_fasta) and pickled == settings: # Log.msg('The provided BLAST settings and query sequences did ' # 'not change since the previous run.') Log.msg('BLAST results already exist:', pe) else: changed_blast_1 = True make_dirs(dir_results) pe_trim_files = zip(blast_db_paths, out_fs, fq_paths, out_fs_fastq, out_fs_fasta) for x in pe_trim_files: Log.msg('Running tblastn on: ' + basename(x[0]), ss) run_blast(exec_file=tblastn, task='tblastn', threads=threads, db_path=x[0], queries_file=aa_queries_file, out_file=x[1], evalue=blast_1_evalue, max_hsps=blast_1_max_hsps, qcov_hsp_perc=blast_1_qcov_hsp_perc, best_hit_overhang=blast_1_best_hit_overhang, best_hit_score_edge=blast_1_best_hit_score_edge, max_target_seqs=blast_1_max_target_seqs, db_genetic_code=genetic_code, out_cols=BLST_RES_COLS_1) Log.msg('Extracting unique BLAST hits using Seqtk:', ss) keep_unique_lines_in_file(x[1]) seqtk_extract_reads(seqtk, x[2], x[3], x[1]) seqtk_fq_to_fa(seqtk, x[3], x[4]) osremove(x[1]) osremove(x[3]) combine_text_files(out_fs_fasta, out_f_fasta) out_f_fasta_temp = out_f_fasta + '_temp' copyfile(out_f_fasta, out_f_fasta_temp) run_cluster_fast(vsearch, ident, out_f_fasta_temp, out_f_fasta) osremove(out_f_fasta_temp) for x in out_fs_fasta: osremove(x) with open(cache_file, 'wb') as f: pickle.dump(settings, f, protocol=PICKLE_PROTOCOL) return changed_blast_1