def predict_genes(self, output_prefix, annotation_species_prefix, genome_fasta, augustus_species, output_directory="./", augustus_strand=None, augustus_gene_model=None, augustus_config_dir=None, augustus_use_softmasking=None, augustus_other_options="", augustus_hintsfile=None, augustus_extrinsicCfgFile=None, augustus_predict_UTR=None, augustus_min_intron_len=None, threads=1, augustus_dir="", hmmer_dir="", blast_dir="", stop_codons_list=("TGA", "TAA", "TAG"), genetic_code_table=1): draft_file_prefix = "%s/raw/%s" % (output_directory, output_prefix) augustus_splited_input_dir = "%s/splited_input/" % output_directory augustus_splited_output_dir = "%s/splited_output_dir" % output_directory output_raw_gff = "%s.raw.gff" % draft_file_prefix output_gff = "%s.renamed.gff" % draft_file_prefix augustus_pep = "%s.pep" % draft_file_prefix AUGUSTUS.path = augustus_dir AUGUSTUS.threads = threads HMMER3.path = hmmer_dir HMMER3.threads = threads BLASTp.path = blast_dir BLASTp.threads = threads print("Annotating genes...") AUGUSTUS.parallel_predict( augustus_species, genome_fasta, output_raw_gff, strand=augustus_strand, gene_model=augustus_gene_model, output_gff3=True, other_options=augustus_other_options, config_dir=augustus_config_dir, use_softmasking=augustus_use_softmasking, hints_file=augustus_hintsfile, split_dir=augustus_splited_input_dir, splited_output_dir=augustus_splited_output_dir, extrinsicCfgFile=augustus_extrinsicCfgFile, predict_UTR=augustus_predict_UTR, combine_output_to_single_file=True, min_intron_len=augustus_min_intron_len) #replace_augustus_ids(augustus_gff, output_prefix, species_prefix=None, number_of_digits_in_id=8): AUGUSTUS.replace_augustus_ids(output_raw_gff, draft_file_prefix, species_prefix=annotation_species_prefix, number_of_digits_in_id=8) #extract_transcript_sequences(self, input_gff_file, genomic_fasta_file, output_prefix, coding_only=False) gffread_file_prefix = "%s.gffread" % draft_file_prefix gffread_transcripts_file, gffread_cds_file, gffread_pep_file = Gffread.extract_transcript_sequences( output_gff, genome_fasta, gffread_file_prefix) gffread_trimmed_cds = ".".join( gffread_cds_file.split(".")[:-1]) + ".trimmed.cds" gffread_trimmed_pep = ".".join( gffread_pep_file.split(".")[:-1]) + ".trimmed.pep" self.trim_cds_and_remove_terminal_stop_codons( gffread_cds_file, gffread_trimmed_cds, stop_codons_list=stop_codons_list ) # using default stop_codons(from universal genetic_code)/ Note that this will affect mtDNA proteins inframe_stop_codons_file_prefix = "%s.inframe_stop_codon" % draft_file_prefix self.translate_sequences_from_file( gffread_trimmed_cds, gffread_trimmed_pep, format="fasta", id_expression=None, genetic_code_table=genetic_code_table, translate_to_stop=False, prefix_of_file_inframe_stop_codons_seqsin= inframe_stop_codons_file_prefix) # Universal code !!! AUGUSTUS.extract_gene_ids_from_output(output_gff, all_annotated_genes_ids) AUGUSTUS.extract_CDS_annotations_from_output(output_gff, CDS_gff) print("Extracting peptides...") AUGUSTUS.extract_proteins_from_output( output_gff, output_pep, id_prefix="", evidence_stats_file=output_evidence_stats, supported_by_hints_file=output_supported_stats) self.compare_sequences_from_files(output_pep, "%s.trimmed.pep" % args.output, "comparison_of_peptides", format="fasta", verbose=True) os.system("awk -F'\\t' 'NR==1 {}; NR > 1 {print $2}' %s > %s" % (output_supported_stats, output_supported_stats_ids)) print("Annotating domains(Pfam database)...") HMMER3.parallel_hmmscan( args.pfam_db, output_pep, output_hmmscan, num_of_seqs_per_scan=None, split_dir="splited_hmmscan_fasta/", splited_output_dir="splited_hmmscan_output_dir", tblout_outfile=None, domtblout_outfile=output_domtblout, pfamtblout_outfile=None, splited_tblout_dir=None, splited_domtblout_dir="hmmscan_domtblout/") HMMER3.extract_dom_ids_hits_from_domtblout( output_domtblout, output_pfam_annotated_dom_ids) hits_dict = HMMER3.extract_dom_names_hits_from_domtblout( output_domtblout, output_pfam_annotated_dom_names) supported_ids = IdSet(hits_dict.keys()) supported_ids.write(output_pfam_supported_transcripts_ids) remove_transcript_ids_str = "sed -re 's/\.t[0123456789]+//' %s | sort -k 1 | uniq > %s" % ( output_pfam_supported_transcripts_ids, output_pfam_supported_genes_ids) os.system(remove_transcript_ids_str) print("Annotating peptides(Swissprot database)...") BLASTp.parallel_blastp(output_pep, args.swissprot_db, evalue=0.0000001, output_format=6, outfile=output_swissprot_blastp_hits, split_dir="splited_blastp_fasta", splited_output_dir="splited_blastp_output_dir") hits_dict = BLASTp.extract_hits_from_tbl_output( output_swissprot_blastp_hits, output_swissprot_blastp_hits_names) supported_ids = IdSet(hits_dict.keys()) supported_ids.write(output_swissprot_supported_transcripts_ids) remove_transcript_ids_str = "sed -re 's/\.t[0123456789]+//' %s | sort -k 1 | uniq > %s" % ( output_swissprot_supported_transcripts_ids, output_swissprot_supported_genes_ids) os.system(remove_transcript_ids_str) """
bad_antigen_candidates_coordinates = "%s.bad_candidates.coordinates" % args.out_prefix bad_antigen_candidates_coordinates_sorted = "%s.bad_candidates.sorted.coordinates" % args.out_prefix BLASTp.threads = args.threads sequence = list(SeqIO.parse(args.input, format="fasta"))[0] print("Constructing kmer list...\n") #print len(sequence.seq) kmer_dict = get_kmer_dict_as_seq_records(sequence.seq, args.length, args.start, args.end) kmer_ids = list(kmer_dict.keys()) SeqIO.write(record_by_expression_generator(kmer_dict), kmer_file, format="fasta") print("Blast of kmers vs species peptides\n") BLASTp.search(kmer_file, args.species_db, outfile=species_blast_hits, blast_options=None, evalue=args.species_evalue, output_format=6) species_grep_string = "grep -v %s %s > %s" % ("|".join(args.protein_ids), species_blast_hits, species_blast_hits_no_self_hits) species_awk_string = "awk '{print$1}' %s | uniq > %s" % (species_blast_hits_no_self_hits, species_blast_hits_no_self_hits_ids) os.system(species_grep_string) os.system(species_awk_string) print("Blast of kmers vs immunogenetic species peptides\n") BLASTp.search(kmer_file, args.immune_db, outfile=immune_blast_hits, blast_options=None, evalue=args.immune_evalue, output_format=6) immune_awk_string = "awk '{print$1}' %s | uniq > %s" % (immune_blast_hits, immune_blast_hits_ids) os.system(immune_awk_string)
genetic_code=args.genetic_code, analyze_only_top_strand=args.analyze_only_top_strand, minimum_protein_length=args.min_prot_len) if args.pfam_database: HMMER3.parallel_hmmscan(args.pfam_database, pep_from_longest_orfs, hmmscan_vs_pfam_output, split_dir=hmmscan_splited_fasta_dir, splited_domtblout_dir=splited_domtblout_dir, domtblout_outfile=domtblout_outfile, dont_output_alignments=True) if args.blast_database: BLASTp.parallel_blastp(pep_from_longest_orfs, args.blast_database, outfile=blastp_outfile, evalue=0.00001, output_format=6, blast_options=" -max_target_seqs 1", combine_output_to_single_file=True, split_dir=blastp_split_dir, splited_output_dir=blastp_splited_output_dir) TransDecoder.predict_pep( args.input, pfam_hits=domtblout_outfile, blastp_hits=blastp_outfile, minimum_orf_length_if_no_other_evidence=args. min_orf_len_if_no_other_evidence, file_with_orfs_for_training=args.file_with_orfs_for_training, number_of_top_orfs_for_training=args.number_of_top_orfs_for_training)
output_pfam_annotated_dom_ids) hits_dict = HMMER3.extract_dom_names_hits_from_domtblout( output_domtblout, output_pfam_annotated_dom_names) supported_ids = IdSet(hits_dict.keys()) supported_ids.write(output_pfam_supported_transcripts_ids) remove_transcript_ids_str = "sed -re 's/\.t[0123456789]+//' %s | sort -k 1 | uniq > %s" % ( output_pfam_supported_transcripts_ids, output_pfam_supported_genes_ids) os.system(remove_transcript_ids_str) if args.swissprot_db: print("Annotating peptides(Swissprot database)...") BLASTp.threads = args.threads BLASTp.parallel_blastp(output_pep, args.swissprot_db, evalue=0.0000001, output_format=6, outfile=output_swissprot_blastp_hits, split_dir="splited_blastp_fasta", splited_output_dir="splited_blastp_output_dir") hits_dict = BLASTp.extract_hits_from_tbl_output( output_swissprot_blastp_hits, output_swissprot_blastp_hits_names) supported_ids = IdSet(hits_dict.keys()) supported_ids.write(output_swissprot_supported_transcripts_ids) remove_transcript_ids_str = "sed -re 's/\.t[0123456789]+//' %s | sort -k 1 | uniq > %s" % ( output_swissprot_supported_transcripts_ids, output_swissprot_supported_genes_ids) os.system(remove_transcript_ids_str) for directory in ("splited_blastp_fasta", "splited_blastp_output_dir"): shutil.rmtree(directory)
if args.database_type == "nucleotide": MakeBLASTDb.make_nucleotide_db(args.input, args.name, mask_file if args.mask else None, output_file=args.name) BLASTn.parallel_blastn(args.input, args.name, outfile=args.output, blast_options=args.other_options, split_dir="splited_fasta", splited_output_dir="splited_output_dir", evalue=args.evalue, output_format=args.output_format, threads=args.threads, combine_output_to_single_file=True) elif args.database_type == "protein": MakeBLASTDb.make_protein_db(args.input, args.name, mask_file if args.mask else None, output_file=args.name) BLASTp.parallel_blastp(args.input, args.name, outfile=args.output, blast_options=args.other_options, split_dir="splited_fasta", splited_output_dir="splited_output_dir", evalue=args.evalue, output_format=args.output_format, threads=args.threads, combine_output_to_single_file=True)