def command_scan(inputfile, pwmfile, nreport=1, fpr=0.01, cutoff=None, bed=False, scan_rc=True, table=False, score_table=False, moods=False, pvalue=None, bgfile=None, genome=None, ncpus=None, normalize=False): motifs = read_motifs(pwmfile) fa = as_fasta(inputfile, genome) # initialize scanner s = Scanner(ncpus=ncpus) s.set_motifs(pwmfile) if genome: s.set_genome(genome=genome) if genome or bgfile: s.set_background(genome=genome, fname=bgfile, length=fa.median_length()) if not score_table: s.set_threshold(fpr=fpr, threshold=cutoff) if table: it = scan_table(s, inputfile, fa, motifs, cutoff, bgfile, nreport, scan_rc, pvalue, moods) elif score_table: it = scan_score_table(s, fa, motifs, scan_rc, normalize=normalize) else: it = scan_normal(s, inputfile, fa, motifs, cutoff, bgfile, nreport, scan_rc, pvalue, moods, bed, normalize=normalize) for row in it: yield row
def get_PWMScore(self, fin_regions_fa): """ Scan motif in every peak. Arguments: fin_regions_fa {[type]} -- [input fasta file] Returns: [type] -- [pfmscorefile] """ pfmscorefile = NamedTemporaryFile(mode="w", dir=mytmpdir(), delete=False) seqs = [s.split(" ")[0] for s in as_fasta(fin_regions_fa, genome=self.genome).ids] s = Scanner(ncpus=self.ncore) s.set_motifs(self.pfmfile) s.set_threshold(threshold=0.0) s.set_genome(self.genome) with open(self.pfmfile) as f: motifs = read_motifs(f) chunksize = 10000 # Run 10k peaks one time. with tqdm(total=len(seqs)) as pbar: for chunk in range(0, len(seqs), chunksize): chunk_seqs = seqs[chunk : chunk + chunksize] # print(chunk, "-", chunk + chunksize, "enhancers") pfm_score = [] it = s.best_score(chunk_seqs, zscore=True, gc=True) # We are using GC-normalization for motif scan because many sequence is GC-enriched. # GimmeMotif develop branch already include GC-normalization option now. for seq, scores in zip(chunk_seqs, it): for motif, score in zip(motifs, scores): pfm_score.append([motif.id, seq, score]) pbar.update(1) pfm_score = pd.DataFrame(pfm_score, columns=["motif", "enhancer", "zscore"]) pfm_score = pfm_score.set_index("motif") # print("\tCombine") pfm_score["zscoreRank"] = minmax_scale(rankdata(pfm_score["zscore"])) # When we built model, rank and minmax normalization was used. cols = ["enhancer", "zscore", "zscoreRank"] write_header = False if chunk == 0: write_header = True pfm_score[cols].to_csv(pfmscorefile, sep="\t", header=write_header) # pbar.update(chunk + chunksize) return pfmscorefile.name
def scan_to_table(input_table, genome, data_dir, scoring, pwmfile=None, ncpus=None): config = MotifConfig() if pwmfile is None: pwmfile = config.get_default_params().get("motif_db", None) if pwmfile is not None: pwmfile = os.path.join(config.get_motif_dir(), pwmfile) if pwmfile is None: raise ValueError("no pwmfile given and no default database specified") logger.info("reading table") if input_table.endswith("feather"): df = pd.read_feather(input_table) idx = df.iloc[:, 0].values else: df = pd.read_table(input_table, index_col=0, comment="#") idx = df.index regions = list(idx) s = Scanner(ncpus=ncpus) s.set_motifs(pwmfile) s.set_genome(genome) nregions = len(regions) scores = [] if scoring == "count": logger.info("setting threshold") s.set_threshold(fpr=FPR, genome=genome) logger.info("creating count table") for row in s.count(regions): scores.append(row) logger.info("done") else: s.set_threshold(threshold=0.0) logger.info("creating score table") for row in s.best_score(regions): scores.append(row) logger.info("done") motif_names = [m.id for m in read_motifs(open(pwmfile))] logger.info("creating dataframe") return pd.DataFrame(scores, index=idx, columns=motif_names)
def get_motif_scores(fa, motifs): s = Scanner() s.set_motifs(motifs) s.set_threshold(threshold=0.0) seqs = Fasta(fa.seqfn) for i, result in enumerate(s.scan(seqs, nreport=1)): intron_id = seqs.ids[i] for m, matches in enumerate(result): motif = motifs[m] for score, pos, strand in matches: if score < 0: score_rescaled = rescale(score, orig_range=[motif.min_score, 0], new_range=[0, 50]) else: score_rescaled = rescale(score, orig_range=[0, motif.max_score], new_range=[50, 100]) yield (intron_id, motif.id, score_rescaled)
def threshold(args): """Calculate motif score threshold for a given FPR.""" if args.fpr < 0 or args.fpr > 1: print("Please specify a FPR between 0 and 1") sys.exit(1) motifs = read_motifs(args.pwmfile) s = Scanner() s.set_motifs(args.pwmfile) s.set_threshold(args.fpr, filename=args.inputfile) print("Motif\tScore\tCutoff") for motif in motifs: min_score = motif.pwm_min_score() max_score = motif.pwm_max_score() opt_score = s.threshold[motif.id] if opt_score is None: opt_score = motif.pwm_max_score() threshold = (opt_score - min_score) / (max_score - min_score) print("{0}\t{1}\t{2}".format(motif.id, opt_score, threshold))
def threshold(args): """Calculate motif score threshold for a given FPR.""" if args.fpr < 0 or args.fpr > 1: print("Please specify a FPR between 0 and 1") sys.exit(1) motifs = read_motifs(args.pwmfile) s = Scanner() s.set_motifs(args.pwmfile) s.set_threshold(args.fpr, filename=args.inputfile) print("Motif\tScore\tCutoff") for motif in motifs: min_score = motif.pwm_min_score() max_score = motif.pwm_max_score() opt_score = s.threshold[motif.id] if opt_score is None: opt_score = motif.pwm_max_score() threshold = (opt_score - min_score) / (max_score - min_score) print("{0}\t{1}\t{2}".format( motif.id, opt_score, threshold))
def scan(self, background_length=200, fpr=0.02, n_cpus=-1, verbose=True): """ Scan DNA sequences searching for TF binding motifs. Args: background_length (int): background length. This is used for the calculation of the binding score. fpr (float): False positive rate for motif identification. n_cpus (int): number of CPUs for parallel calculation. verbose (bool): Whether to show a progress bar. """ self.fpr = fpr self.background_length = background_length print("initiating scanner ...") ## 1. initialilze scanner ## # load motif motifs = default_motifs() # initialize scanner s = Scanner(ncpus=n_cpus) # set parameters s.set_motifs(motifs) s.set_background(genome=self.ref_genome, length=background_length) #s.set_background(genome="mm9", length=400) s.set_threshold(fpr=fpr) ## 2. motif scan ## print("getting DNA sequences ...") target_sequences = peak2fasta(self.all_peaks, self.ref_genome) print("scanning motifs ...") self.scanned_df = scan_dna_for_motifs(s, motifs, target_sequences, verbose) self.__addLog("scanMotifs")
def scan_to_table( input_table, genome, scoring, pfmfile=None, ncpus=None, zscore=True, gc=True ): """Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pfmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s """ config = MotifConfig() if pfmfile is None: pfmfile = config.get_default_params().get("motif_db", None) if pfmfile is not None: pfmfile = os.path.join(config.get_motif_dir(), pfmfile) if pfmfile is None: raise ValueError("no pfmfile given and no default database specified") logger.info("reading table") if input_table.endswith("feather"): df = pd.read_feather(input_table) idx = df.iloc[:, 0].values else: df = pd.read_table(input_table, index_col=0, comment="#") idx = df.index regions = list(idx) if len(regions) >= 1000: check_regions = np.random.choice(regions, size=1000, replace=False) else: check_regions = regions size = int( np.median([len(seq) for seq in as_fasta(check_regions, genome=genome).seqs]) ) s = Scanner(ncpus=ncpus) s.set_motifs(pfmfile) s.set_genome(genome) s.set_background(genome=genome, gc=gc, size=size) scores = [] if scoring == "count": logger.info("setting threshold") s.set_threshold(fpr=FPR) logger.info("creating count table") for row in s.count(regions): scores.append(row) logger.info("done") else: s.set_threshold(threshold=0.0) msg = "creating score table" if zscore: msg += " (z-score" if gc: msg += ", GC%" msg += ")" else: msg += " (logodds)" logger.info(msg) for row in s.best_score(regions, zscore=zscore, gc=gc): scores.append(row) logger.info("done") motif_names = [m.id for m in read_motifs(pfmfile)] logger.info("creating dataframe") return pd.DataFrame(scores, index=idx, columns=motif_names)
def diff(args): infiles = args.inputfiles.split(",") bgfile = args.bgfile outfile = args.outputfile pwmfile = args.pwmfile cutoff = args.cutoff genome = args.genome minenr = float(args.minenr) minfreq = float(args.minfreq) tmpdir = mkdtemp() # Retrieve FASTA clusters from BED file if len(infiles) == 1 and infiles[0].endswith("bed"): if not args.genome: sys.stderr.write("Can't convert BED file without genome!\n") sys.exit(1) clusters = {} for line in open(infiles[0]): vals = line.strip().split("\t") clusters.setdefault(vals[4], []).append(vals[:3]) infiles = [] for cluster, regions in clusters.items(): sys.stderr.write("Creating FASTA file for {0}\n".format(cluster)) inbed = os.path.join(tmpdir, "{0}.bed".format(cluster)) outfa = os.path.join(tmpdir, "{0}.fa".format(cluster)) with open(inbed, "w") as f: for vals in regions: f.write("{0}\t{1}\t{2}\n".format(*vals)) Genome(genome).track2fasta(inbed, outfa) infiles.append(outfa) pwms = dict([(m.id, m) for m in pwmfile_to_motifs(pwmfile)]) motifs = [m for m in pwms.keys()] names = [os.path.basename(os.path.splitext(fname)[0]) for fname in infiles] s = Scanner() s.set_motifs(pwmfile) s.set_threshold(threshold=cutoff) # Get background frequencies nbg = float(len(Fasta(bgfile).seqs)) bgcounts = s.total_count(bgfile, nreport=1) bgfreq = [(c + 0.01) / nbg for c in bgcounts] # Get frequences in input files freq = {} counts = {} for fname in infiles: mcounts = s.total_count(fname, nreport=1) n = float(len(Fasta(fname).seqs)) counts[fname] = mcounts freq[fname] = [(c + 0.01) / n for c in mcounts] freq = np.array([freq[fname] for fname in infiles]).transpose() counts = np.array([counts[fname] for fname in infiles]).transpose() #for row in freq: # print freq diff_plot(motifs, pwms, names, freq, counts, bgfreq, bgcounts, outfile, minenr=minenr, minfreq=minfreq) shutil.rmtree(tmpdir)
def diff(args): infiles = args.inputfiles.split(",") bgfile = args.bgfile outfile = args.outputfile pwmfile = args.pwmfile cutoff = args.cutoff genome = args.genome minenr = float(args.minenr) minfreq = float(args.minfreq) tmpdir = mkdtemp() # Retrieve FASTA clusters from BED file if len(infiles) == 1 and infiles[0].endswith("bed"): if not args.genome: sys.stderr.write("Can't convert BED file without genome!\n") sys.exit(1) clusters = {} for line in open(infiles[0]): vals = line.strip().split("\t") clusters.setdefault(vals[4], []).append(vals[:3]) infiles = [] for cluster,regions in clusters.items(): sys.stderr.write("Creating FASTA file for {0}\n".format(cluster)) inbed = os.path.join(tmpdir, "{0}.bed".format(cluster)) outfa = os.path.join(tmpdir, "{0}.fa".format(cluster)) with open(inbed, "w") as f: for vals in regions: f.write("{0}\t{1}\t{2}\n".format(*vals)) Genome(genome).track2fasta(inbed, outfa) infiles.append(outfa) pwms = dict([(m.id, m) for m in pwmfile_to_motifs(pwmfile)]) motifs = [m for m in pwms.keys()] names = [os.path.basename(os.path.splitext(fname)[0]) for fname in infiles] s = Scanner() s.set_motifs(pwmfile) s.set_threshold(threshold=cutoff) # Get background frequencies nbg = float(len(Fasta(bgfile).seqs)) bgcounts = s.total_count(bgfile, nreport=1) bgfreq = [(c + 0.01) / nbg for c in bgcounts] # Get frequences in input files freq = {} counts = {} for fname in infiles: mcounts = s.total_count(fname, nreport=1) n = float(len(Fasta(fname).seqs)) counts[fname] = mcounts freq[fname] = [(c + 0.01) / n for c in mcounts] freq = np.array([freq[fname] for fname in infiles]).transpose() counts = np.array([counts[fname] for fname in infiles]).transpose() #for row in freq: # print freq diff_plot(motifs, pwms, names, freq, counts, bgfreq, bgcounts, outfile, minenr=minenr, minfreq=minfreq) shutil.rmtree(tmpdir)
def moap(inputfile, method="hypergeom", scoring=None, outfile=None, motiffile=None, pwmfile=None, genome=None, fpr=0.01, ncpus=None, subsample=None): """Run a single motif activity prediction algorithm. Parameters ---------- inputfile : str :1File with regions (chr:start-end) in first column and either cluster name in second column or a table with values. method : str, optional Motif activity method to use. Any of 'hypergeom', 'lasso', 'lightningclassification', 'lightningregressor', 'bayesianridge', 'rf', 'xgboost'. Default is 'hypergeom'. scoring: str, optional Either 'score' or 'count' outfile : str, optional Name of outputfile to save the fitted activity values. motiffile : str, optional Table with motif scan results. First column should be exactly the same regions as in the inputfile. pwmfile : str, optional File with motifs in pwm format. Required when motiffile is not supplied. genome : str, optional Genome name, as indexed by gimme. Required when motiffile is not supplied fpr : float, optional FPR for motif scanning ncpus : int, optional Number of threads to use. Default is the number specified in the config. Returns ------- pandas DataFrame with motif activity """ if scoring and scoring not in ['score', 'count']: raise ValueError("valid values are 'score' and 'count'") config = MotifConfig() if inputfile.endswith("feather"): df = pd.read_feather(inputfile) df = df.set_index(df.columns[0]) else: # read data df = pd.read_table(inputfile, index_col=0, comment="#") clf = Moap.create(method, ncpus=ncpus) if clf.ptype == "classification": if df.shape[1] != 1: raise ValueError("1 column expected for {}".format(method)) else: if np.dtype('object') in set(df.dtypes): raise ValueError( "columns should all be numeric for {}".format(method)) if motiffile is None: if genome is None: raise ValueError("need a genome") pwmfile = pwmfile_location(pwmfile) try: motifs = read_motifs(pwmfile) except: sys.stderr.write("can't read motifs from {}".format(pwmfile)) raise # initialize scanner s = Scanner(ncpus=ncpus) sys.stderr.write(pwmfile + "\n") s.set_motifs(pwmfile) s.set_genome(genome) s.set_background(genome=genome) # scan for motifs sys.stderr.write("scanning for motifs\n") motif_names = [m.id for m in read_motifs(pwmfile)] scores = [] if method == 'classic' or scoring == "count": s.set_threshold(fpr=fpr) for row in s.count(list(df.index)): scores.append(row) else: for row in s.best_score(list(df.index), normalize=True): scores.append(row) motifs = pd.DataFrame(scores, index=df.index, columns=motif_names) else: motifs = pd.read_table(motiffile, index_col=0, comment="#") if outfile and os.path.exists(outfile): out = pd.read_table(outfile, index_col=0, comment="#") ncols = df.shape[1] if ncols == 1: ncols = len(df.iloc[:, 0].unique()) if out.shape[0] == motifs.shape[1] and out.shape[1] == ncols: logger.warn("%s output already exists... skipping", method) return out if subsample is not None: n = int(subsample * df.shape[0]) logger.debug("Subsampling %d regions", n) df = df.sample(n) motifs = motifs.loc[df.index] if method == "lightningregressor": outdir = os.path.dirname(outfile) tmpname = os.path.join(outdir, ".lightning.tmp") clf.fit(motifs, df, tmpdir=tmpname) shutil.rmtree(tmpname) else: clf.fit(motifs, df) if outfile: with open(outfile, "w") as f: f.write( "# maelstrom - GimmeMotifs version {}\n".format(__version__)) f.write("# method: {} with motif {}\n".format(method, scoring)) if genome: f.write("# genome: {}\n".format(genome)) if motiffile: f.write("# motif table: {}\n".format(motiffile)) f.write("# {}\n".format(clf.act_description)) with open(outfile, "a") as f: clf.act_.to_csv(f, sep="\t") return clf.act_
def scan_to_table(input_table, genome, scoring, pwmfile=None, ncpus=None): """Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pwmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s """ config = MotifConfig() if pwmfile is None: pwmfile = config.get_default_params().get("motif_db", None) if pwmfile is not None: pwmfile = os.path.join(config.get_motif_dir(), pwmfile) if pwmfile is None: raise ValueError("no pwmfile given and no default database specified") logger.info("reading table") if input_table.endswith("feather"): df = pd.read_feather(input_table) idx = df.iloc[:,0].values else: df = pd.read_table(input_table, index_col=0, comment="#") idx = df.index regions = list(idx) s = Scanner(ncpus=ncpus) s.set_motifs(pwmfile) s.set_genome(genome) s.set_background(genome=genome) nregions = len(regions) scores = [] if scoring == "count": logger.info("setting threshold") s.set_threshold(fpr=FPR) logger.info("creating count table") for row in s.count(regions): scores.append(row) logger.info("done") else: s.set_threshold(threshold=0.0) logger.info("creating score table") for row in s.best_score(regions, normalize=True): scores.append(row) logger.info("done") motif_names = [m.id for m in read_motifs(pwmfile)] logger.info("creating dataframe") return pd.DataFrame(scores, index=idx, columns=motif_names)
def scan(self, background_length=200, fpr=0.02, n_cpus=-1, verbose=True, motifs=None, TF_evidence_level="direct_and_indirect", TF_formatting="auto"): """ Scan DNA sequences searching for TF binding motifs. Args: background_length (int): background length. This is used for the calculation of the binding score. fpr (float): False positive rate for motif identification. n_cpus (int): number of CPUs for parallel calculation. verbose (bool): Whether to show a progress bar. motifs (list): a list of gimmemotifs motifs, will revert to default_motifs() if None TF_evidence_level (str): Please select one from ["direct", "direct_and_indirect"]. If "direct" is selected, TFs that have a binding evidence were used. If "direct_and_indirect" is selected, TFs with binding evidence and inferred TFs are used. For more information, please read explanation of Motif class in gimmemotifs documentation (https://gimmemotifs.readthedocs.io/en/master/index.html) """ self.fpr = fpr self.background_length = background_length ## 1. initialilze scanner ## # load motif if motifs is None: if verbose: print( "No motif data entered. Loading default motifs for your species ..." ) if self.species in [ "Mouse", "Human", "Rat" ]: # If species is vertebrate, we use gimmemotif default motifs as a default. motifs = default_motifs() self.motif_db_name = "gimme.vertebrate.v5.0" self.TF_formatting = True if verbose: print( " Default motif for vertebrate: gimme.vertebrate.v5.0. \n For more information, please go https://gimmemotifs.readthedocs.io/en/master/overview.html \n" ) elif self.species in [ "Zebrafish" ]: # If species is Zebrafish, we use CisBP database. self.motif_db_name = 'CisBP_ver2_Danio_rerio.pfm' motifs = load_motifs(self.motif_db_name) self.TF_formatting = False if verbose: print( f" Default motif for {self.species}: {self.motif_db_name}. \n For more information, please go celloracle documentation. \n" ) elif self.species in [ "S.cerevisiae" ]: # If species is S.cerevisiae, we use CisBP database. self.motif_db_name = 'CisBP_ver2_Saccharomyces_cerevisiae.pfm' motifs = load_motifs(self.motif_db_name) self.TF_formatting = False if verbose: print( f" Default motif for {self.species}: {self.motif_db_name}. \n For more information, please go celloracle documentation. \n" ) elif self.species in [ "Xenopus" ]: # If species is S.cerevisiae, we use CisBP database. self.motif_db_name = 'CisBP_ver2_Xenopus_tropicalis_and_Xenopus_laevis.pfm' motifs = load_motifs(self.motif_db_name) self.TF_formatting = False if verbose: print( f" Default motif for {self.species}: {self.motif_db_name}. \n For more information, please go celloracle documentation. \n" ) elif self.species in [ "Drosophila" ]: # If species is S.cerevisiae, we use CisBP database. self.motif_db_name = 'CisBP_ver2_Drosophila_mix.pfm' motifs = load_motifs(self.motif_db_name) self.TF_formatting = False if verbose: print( f" Default motif for {self.species}: {self.motif_db_name}. \n For more information, please go celloracle documentation. \n" ) elif self.species in [ "C.elegans" ]: # If species is S.cerevisiae, we use CisBP database. self.motif_db_name = 'CisBP_ver2_Caenorhabditis_elegans.pfm' motifs = load_motifs(self.motif_db_name) self.TF_formatting = False if verbose: print( f" Default motif for {self.species}: {self.motif_db_name}. \n For more information, please go celloracle documentation. \n" ) elif self.species in [ "Arabidopsis" ]: # If species is S.cerevisiae, we use CisBP database. self.motif_db_name = 'CisBP_ver2_Arabidopsis_thaliana.pfm' motifs = load_motifs(self.motif_db_name) self.TF_formatting = False if verbose: print( f" Default motif for {self.species}: {self.motif_db_name}. \n For more information, please go celloracle documentation. \n" ) else: raise ValueError( f"We have no default motifs for your species, {self.species}. Please set motifs." ) else: # Check format if isinstance(motifs, list): if isinstance(motifs[0], Motif): if verbose: print( "Checking your motifs... Motifs format looks good. \n" ) else: raise ValueError(f"Motif data type was invalid.") else: raise ValueError( f"motifs should be a list of Motif object in gimmemotifs.") self.motif_db_name = "custom_motifs" if TF_formatting == "auto": self.TF_formatting = False else: self.TF_formatting = TF_formatting self.motifs = motifs self.dic_motif2TFs = _get_dic_motif2TFs( species=self.species, motifs=motifs, TF_evidence_level=TF_evidence_level, formatting=self.TF_formatting) self.TF_evidence_level = TF_evidence_level # initialize scanner if verbose: print("Initiating scanner... \n") s = Scanner(ncpus=n_cpus) # set parameters s.set_motifs(motifs) try: s.set_background( genome=self.ref_genome, size=background_length) # For gimmemotifs ver 14.4 except: s.set_background( genome=self.ref_genome, length=background_length) # For old gimmemotifs ver 13 #s.set_background(genome="mm9", length=400) if verbose: print( "Calculating FPR-based threshold. This step may take substantial time when you load a new ref-genome. It will be done quicker on the second time. \n" ) s.set_threshold(fpr=fpr) ## 2. motif scan ## print("Convert peak info into DNA sequences ... \n") # Get DNA sequences target_sequences = peak2fasta(self.all_peaks, self.ref_genome) # Remove DNA sequence with zero length target_sequences = remove_zero_seq(fasta_object=target_sequences) print( "Scanning motifs ... It may take several hours if you proccess many peaks. \n" ) self.scanned_df = scan_dna_for_motifs(s, motifs, target_sequences, verbose) self.__addLog("scanMotifs")