def cluster(self, threshold=0.5, metric = "pcc", clust_method = "average"): """ Returns: ---------- dict A dictionary with keys=cluster names and values=MotifList objects """ #Needs gimmemotif from gimmemotifs.motif import Motif from gimmemotifs.comparison import MotifComparer sns.set_style("ticks") #set style back to ticks, as this is set globally during gimmemotifs import #Fill in self.gimme_obj variable motif_list = [motif.get_gimmemotif().gimme_obj for motif in self] #list of gimmemotif objects #Similarities between all motifs mc = MotifComparer() score_dict = mc.get_all_scores(motif_list, motif_list, match = "total", metric = metric, combine = "mean") #metric can be: seqcor, pcc, ed, distance, wic, chisq, akl or ssd self.similarity_matrix = generate_similarity_matrix(score_dict) # Clustering vector = ssd.squareform(self.similarity_matrix.to_numpy()) self.linkage_mat = linkage(vector, method=clust_method) # Flatten clusters fclust_labels = fcluster(self.linkage_mat, threshold, criterion="distance") #cluster membership per motif formatted_labels = ["Cluster_{0}".format(label) for label in fclust_labels] # Extract motifs belonging to each cluster cluster_dict = {label: MotifList() for label in formatted_labels} #initialize dictionary for i, cluster_label in enumerate(formatted_labels): cluster_dict[cluster_label].append(self[i]) return cluster_dict
def create_consensus(self): """ Create consensus motif from MotifList """ motif_list = [motif.gimme_obj for motif in self] #list of gimmemotif objects if len(motif_list) > 1: consensus_found = False mc = MotifComparer() #Initialize score_dict score_dict = mc.get_all_scores(motif_list, motif_list, match="total", metric="pcc", combine="mean") while not consensus_found: #Which motifs to merge? best_similarity_motifs = sorted( find_best_pair(motif_list, score_dict) ) #indices of most similar motifs in cluster_motifs #Merge new_motif = merge_motifs(motif_list[best_similarity_motifs[0]], motif_list[best_similarity_motifs[1]]) del (motif_list[best_similarity_motifs[1]]) motif_list[best_similarity_motifs[0]] = new_motif if len(motif_list) == 1: #done merging consensus_found = True else: #Update score_dict #add the comparison of the new motif to the score_dict score_dict[new_motif.id] = score_dict.get(new_motif.id, {}) for m in motif_list: score_dict[new_motif.id][m.id] = mc.compare_motifs( new_motif, m, metric="pcc") score_dict[m.id][new_motif.id] = mc.compare_motifs( m, new_motif, metric="pcc") #Round pwm values gimmemotif_consensus = motif_list[0] gimmemotif_consensus.pwm = [[round(f, 5) for f in l] for l in gimmemotif_consensus.pwm] #Convert back to OneMotif obj onemotif_consensus = gimmemotif_to_onemotif(gimmemotif_consensus) onemotif_consensus.gimme_obj = gimmemotif_consensus #Control the naming of the new motif all_names = [motif.name for motif in self] onemotif_consensus.name = ",".join(all_names[:3]) onemotif_consensus.name += "(...)" if len(all_names) > 3 else "" return (onemotif_consensus)
def cluster_motifs(motifs, match="total", metric="wic", combine="mean", pval=True, threshold=0.95, trim_edges=False, edge_ic_cutoff=0.2, include_bg=True, progress=True): """ Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True. """ # First read pfm or pfm formatted motiffile if type([]) != type(motifs): motifs = read_motifs(open(motifs), fmt="pwm") mc = MotifComparer() # Trim edges with low information content if trim_edges: for motif in motifs: motif.trim(edge_ic_cutoff) # Make a MotifTree node for every motif nodes = [MotifTree(m) for m in motifs] # Determine all pairwise scores and maxscore per motif scores = {} motif_nodes = dict([(n.motif.id,n) for n in nodes]) motifs = [n.motif for n in nodes] if progress: sys.stderr.write("Calculating initial scores\n") result = mc.get_all_scores(motifs, motifs, match, metric, combine, pval, parallel=True) for m1, other_motifs in result.items(): for m2, score in other_motifs.items(): if m1 == m2: if pval: motif_nodes[m1].maxscore = 1 - score[0] else: motif_nodes[m1].maxscore = score[0] else: if pval: score = [1 - score[0]] + score[1:] scores[(motif_nodes[m1],motif_nodes[m2])] = score cluster_nodes = [node for node in nodes] ave_count = 1 total = len(cluster_nodes) while len(cluster_nodes) > 1: l = sorted(scores.keys(), key=lambda x: scores[x][0]) i = -1 (n1, n2) = l[i] while not n1 in cluster_nodes or not n2 in cluster_nodes: i -= 1 (n1,n2) = l[i] (score, pos, orientation) = scores[(n1,n2)] ave_motif = n1.motif.average_motifs(n2.motif, pos, orientation, include_bg=include_bg) ave_motif.trim(edge_ic_cutoff) ave_motif.id = "Average_%s" % ave_count ave_count += 1 new_node = MotifTree(ave_motif) if pval: new_node.maxscore = 1 - mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] else: new_node.maxscore = mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] new_node.mergescore = score #print "%s + %s = %s with score %s" % (n1.motif.id, n2.motif.id, ave_motif.id, score) n1.parent = new_node n2.parent = new_node new_node.left = n1 new_node.right = n2 cmp_nodes = dict([(node.motif, node) for node in nodes if not node.parent]) if progress: progress = (1 - len(cmp_nodes) / float(total)) * 100 sys.stderr.write('\rClustering [{0}{1}] {2}%'.format( '#'*(int(progress)/10), " "*(10 - int(progress)/10), int(progress))) result = mc.get_all_scores( [new_node.motif], cmp_nodes.keys(), match, metric, combine, pval, parallel=True) for motif, n in cmp_nodes.items(): x = result[new_node.motif.id][motif.id] if pval: x = [1 - x[0]] + x[1:] scores[(new_node, n)] = x nodes.append(new_node) cluster_nodes = [node for node in nodes if not node.parent] if progress: sys.stderr.write("\n") root = nodes[-1] for node in [node for node in nodes if not node.left]: node.parent.checkMerge(root, threshold) return root
def cluster_motifs( motifs, match="total", metric="wic", combine="mean", pval=True, threshold=0.95, trim_edges=False, edge_ic_cutoff=0.2, include_bg=True, progress=True, ): """ Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True. """ # First read pfm or pfm formatted motiffile if type([]) != type(motifs): motifs = read_motifs(open(motifs), fmt="pwm") mc = MotifComparer() # Trim edges with low information content if trim_edges: for motif in motifs: motif.trim(edge_ic_cutoff) # Make a MotifTree node for every motif nodes = [MotifTree(m) for m in motifs] # Determine all pairwise scores and maxscore per motif scores = {} motif_nodes = dict([(n.motif.id, n) for n in nodes]) motifs = [n.motif for n in nodes] if progress: sys.stderr.write("Calculating initial scores\n") result = mc.get_all_scores(motifs, motifs, match, metric, combine, pval, parallel=True) for m1, other_motifs in result.items(): for m2, score in other_motifs.items(): if m1 == m2: if pval: motif_nodes[m1].maxscore = 1 - score[0] else: motif_nodes[m1].maxscore = score[0] else: if pval: score = [1 - score[0]] + score[1:] scores[(motif_nodes[m1], motif_nodes[m2])] = score cluster_nodes = [node for node in nodes] ave_count = 1 total = len(cluster_nodes) while len(cluster_nodes) > 1: l = sorted(scores.keys(), key=lambda x: scores[x][0]) i = -1 (n1, n2) = l[i] while not n1 in cluster_nodes or not n2 in cluster_nodes: i -= 1 (n1, n2) = l[i] (score, pos, orientation) = scores[(n1, n2)] ave_motif = n1.motif.average_motifs(n2.motif, pos, orientation, include_bg=include_bg) ave_motif.trim(edge_ic_cutoff) ave_motif.id = "Average_%s" % ave_count ave_count += 1 new_node = MotifTree(ave_motif) if pval: new_node.maxscore = 1 - mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] else: new_node.maxscore = mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] new_node.mergescore = score # print "%s + %s = %s with score %s" % (n1.motif.id, n2.motif.id, ave_motif.id, score) n1.parent = new_node n2.parent = new_node new_node.left = n1 new_node.right = n2 cmp_nodes = dict([(node.motif, node) for node in nodes if not node.parent]) if progress: progress = (1 - len(cmp_nodes) / float(total)) * 100 sys.stderr.write( "\rClustering [{0}{1}] {2}%".format( "#" * (int(progress) / 10), " " * (10 - int(progress) / 10), int(progress) ) ) result = mc.get_all_scores([new_node.motif], cmp_nodes.keys(), match, metric, combine, pval, parallel=True) for motif, n in cmp_nodes.items(): x = result[new_node.motif.id][motif.id] if pval: x = [1 - x[0]] + x[1:] scores[(new_node, n)] = x nodes.append(new_node) cluster_nodes = [node for node in nodes if not node.parent] if progress: sys.stderr.write("\n") root = nodes[-1] for node in [node for node in nodes if not node.left]: node.parent.checkMerge(root, threshold) return root
import sys from gimmemotifs.motif import read_motifs from gimmemotifs.comparison import seqcor, MotifComparer, _get_all_scores pwmfile = sys.argv[1] outfile = sys.argv[2] chunksize = int(sys.argv[3]) chunk = int(sys.argv[4]) metric = sys.argv[5] if metric not in ["wic", "seqcor", "pcc", "ed"]: raise ValueError("invalid metric {}".format(metric)) all_motifs = read_motifs(open(pwmfile)) chunk_motifs = all_motifs[(chunk - 1) * chunksize:chunk * chunksize] mc = MotifComparer() if metric == "pcc": dists = mc.get_all_scores(chunk_motifs, all_motifs, "partial", metric, "mean", False) else: dists = mc.get_all_scores(chunk_motifs, all_motifs, "total", metric, "mean", False) cols = list(dists.values())[0] with open(outfile, "w") as f: f.write("\t{}\n".format("\t".join(cols))) for k, v in dists.items(): f.write("{}\t{}\n".format( k, "\t".join(["{:.6f}".format(v[c][0]) for c in cols])))