def approx_pattern_count(text, pattern, d): fragment_length = len(pattern) count = 0 for i, fragment in enumerate(["".join(x) for x in window(text, fragment_length)]): if hamming_distance(fragment, pattern) <= d: count += 1 return count
def approx_pattern_count(text, pattern, d): fragment_length = len(pattern) count = 0 for i, fragment in enumerate( ["".join(x) for x in window(text, fragment_length)]): if hamming_distance(fragment, pattern) <= d: count += 1 return count
def enumerate_motifs(dnas, k, d): kmers = [] for text in dnas: kmers.extend(["".join(x) for x in window(text, k)]) resulting_set = set() for kmer in kmers: for modified_kmer in generate_words_with_mismatches(kmer, d): will_add = True for text in dnas: was_found = False for fragment in ["".join(x) for x in window(text, k)]: if hamming_distance(fragment, modified_kmer) <= d: was_found = True break if not was_found: will_add = False break if will_add: resulting_set.add(modified_kmer) return list(resulting_set)
def distance(kmer, string, k): result = k+2 for fragment in ["".join(x) for x in window(string, k)]: if hamming_distance(kmer, fragment) < result: result = hamming_distance(kmer, fragment) return result
def distance(kmer, string, k): result = k + 2 for fragment in ["".join(x) for x in window(string, k)]: if hamming_distance(kmer, fragment) < result: result = hamming_distance(kmer, fragment) return result
from _01_07_hamming_distance import hamming_distance from _01_02_frequent_words import window if __name__ == '__main__': with open('in.txt', 'r') as f: pattern = f.readline().strip() text = f.readline().strip() d = int(f.readline()) fragment_length = len(pattern) indexes = list() for i, fragment in enumerate( ["".join(x) for x in window(text, fragment_length)]): if hamming_distance(fragment, pattern) <= d: indexes.append(i) with open('out.txt', 'w') as f: f.write(' '.join(map(str, indexes)))
from _01_07_hamming_distance import hamming_distance from _01_02_frequent_words import window if __name__ == '__main__': with open('in.txt', 'r') as f: pattern = f.readline().strip() text = f.readline().strip() d = int(f.readline()) fragment_length = len(pattern) indexes = list() for i, fragment in enumerate(["".join(x) for x in window(text, fragment_length)]): if hamming_distance(fragment, pattern) <= d: indexes.append(i) with open('out.txt', 'w') as f: f.write(' '.join(map(str, indexes)))