def get_scores(self, terms): """creates a list of scores for each file in corpus The score = weighted frequency / the total word count in the file. Compute this score for each term in a query and sum all the scores. Args: terms (list) : a list of str Returns: list : a list of tuples, each containing the filename and its relevancy score """ scores = HashTableLinear() for item in terms: if item in self.term_freqs: temp = self.term_freqs[item] for thing in temp.hash_table: if thing is not None: if thing[0] in scores: scores[thing[0]] += self.get_wf(thing[1]) else: scores.put(thing[0], self.get_wf(thing[1])) for item in scores.hash_table: if item is None: continue else: scores[item[0]] /= self.doc_length[item[0]] return scores
def count_words(self, filename, words): """ Count words in a file and store the frequency of each word in the term_freqs hash table. Words should not contain stopwords. Also store the total count of words contained in the file in the doc_length hash table. Args: filename (str): the file name words (list): a list of words """ # store total count of words in the doc_length hash table self.doc_length.put(filename, len(words)) # iterate through each word for word in words: # calculate frequency of this word in this document word_frequency = words.count( word) # returns number of occurences of this word in words # if word is already in term_freqs if self.term_freqs.contains(word): # add new ("doc1", freq) pair to term_freqs[word] (which is the lower hashtable) self.term_freqs[word][1].put(filename, word_frequency) # if word is not already in term_freqs else: # create new frequency hashtable for each term ("doc1", frequency) freq_hashtable = HashTable() freq_hashtable.put(filename, word_frequency) # put this newly created hash table into term_freqs hash table self.term_freqs.put(word, freq_hashtable)
def count_words(self, filename, words): """count words in a file and store the frequency of each word in the term_freqs hash table. The keys of the term_freqs hash table shall be words. The values of the term_freqs hash table shall be hash tables (term_freqs is a hash table of hash tables). The keys of the hash tables (inner hash table) stored in the term_freqs shall be file names. The values of the inner hash tables shall be the frequencies of words. For example, self.term_freqs[word][filename] += 1; Words should not contain stopwords. Also store the total count of words contained in the file in the doc_length hash table. Args: filename (str) : the file name words (list) : a list of words """ for item in words: if self.term_freqs.contains(item): if self.term_freqs[item].contains(filename): self.term_freqs[item][filename] += 1 else: self.term_freqs[item].put(filename, 1) else: ht = HashTableLinear() ht.put(filename, 1) self.term_freqs.put(item, ht) if self.doc_length.contains(filename): self.doc_length[filename] += 1 else: self.doc_length.put(filename, 1)
def get_scores(self, terms): """ Creates a list of scores for each file in corpus The score = weighted frequency / the total word count in file. Args: terms (list): a list of str Returns: list: a list of tuples, each containing the file_path_name and its relevancy score """ scores = HashTableLinear() for term in terms: word_hash_table = self.term_freqs.get(term) for file in self.file_list: if word_hash_table.contains(file): if scores.contains(file): scores[file] += self.get_wf(word_hash_table[file]) else: scores[file] = self.get_wf(word_hash_table[file]) score_list = [] for file in self.file_list: if scores.contains(file) and scores[file] > 0: norm_score = scores[file] / self.doc_length[file] score_list.append((file, norm_score)) return score_list
def search(self, query): """ Search for the query terms in files Args: query (str): query input, "user input goes here" Returns: list: a list of files in descending order of relevancy """ # parse words filtered_query = self.parse_words([query]) # remove duplicate words using a hash table word_table = HashTable() for word in filtered_query: word_table.put(word, word) word_table_keys = word_table.keys() parsed_query_terms = [] # changes from string to a list # add all words from hash table to list using keys() for key in word_table_keys: parsed_query_terms.append(word_table[key][0]) # pass query terms to get_scores() tuples = self.get_scores(parsed_query_terms) # pass resulting list of tuples to rank() results = self.rank(tuples) # rank's result will be displayed in descending order on screen for a_tuple in results: print(a_tuple[0])
def __init__(self, directory, stopwords=[]): self.doc_length = HashTableLinear( ) # Replace HashTableLinear() with your hash table. self.doc_freqs = HashTableLinear( ) # this will not be used in this assignment self.term_freqs = HashTableLinear() self.stopwords = stopwords self.index_files(directory) self.directory = directory
def __init__(self, directory, stopwords): """ Initialize the data structure by taking a directory name and a hash table containing stopwords. Args: directory (str): a directory name stopwords (HashMap): a hash table containing stopwords """ self.doc_length = HashTableLinear() self.term_freqs = HashTableLinear() self.stopwords = stopwords self.file_list = [] self.index_files(directory)
def test_linear2(self): ht = HashTableLinear() for i in range(22): ht.put(chr(i), i) self.assertEqual(ht.size(), 22) self.assertTrue(ht.load_factor() <= 0.75) self.assertTrue(ht.contains(chr(0))) self.assertTrue(ht.contains(chr(1))) self.assertTrue(ht.contains(chr(19))) self.assertFalse(ht.contains(chr(22)))
def test_linear1(self): ht = HashTableLinear() for i in range(11): ht.put(str(i), i) self.assertEqual(ht.size(), 11) self.assertTrue(ht.load_factor() <= 0.75) self.assertTrue(ht.contains('0')) self.assertTrue(ht.contains('1')) self.assertTrue(ht.contains('10')) self.assertFalse(ht.contains('11'))
def test_SE(self): SE = SearchEngine( "docs", import_stopwords("stop_words.txt", HashTableLinear())) self.assertEqual(SE.doc_length.num_items, 4) self.assertEqual(SE.stopwords, import_stopwords("stop_words.txt", HashTableLinear())) self.assertEqual( SE.search("Computer Science")[0], Pair("docs\\test.txt", 1.0)) self.assertEqual(SE.search("ADT")[0][0], "docs\\data_structure.txt") self.assertEqual(round(SE.search("ADT")[0][1], 2), 0.01) self.assertEqual( SE.search("Hash Table")[1][0], "docs\\data_structure.txt") self.assertEqual(round(SE.search("Hash Table")[1][1], 2), 0.01) list_of_pairs = [ Pair("P", 5), Pair("A", 2), Pair("R", 1), Pair("T", 4), Pair("H", 3) ] self.assertEqual(SE.rank(list_of_pairs), [ Pair("P", 5), Pair("T", 4), Pair("H", 3), Pair("A", 2), Pair("R", 1) ]) self.assertEqual( SE.get_scores(["computer", "science"])[0], Pair("docs\\test.txt", 1.0)) self.assertEqual(SE.get_scores(["every", "nothing", "few"]), []) self.assertEqual(round(SE.get_wf(6), 2), 2.79) self.assertEqual(SE.get_wf(-6), 0) list1 = [ "Automated information retrieval systems of ", "Information retrieval and afterwards say\n" ] list2 = [ 'automated', 'information', 'retrieval', 'systems', 'information', 'retrieval' ] self.assertEqual(SE.parse_words(list1), list2) self.assertEqual( SE.parse_words(["and afterwards say\n", "much without the"]), []) self.assertEqual(SE.read_file("docs\\test.txt"), ["computer science\n"])
def test_linear3(self): ht = HashTableLinear() stop_words = import_stopwords(stop_words.txt, ht) self.assertEqual(stop_words.size(), 305) self.assertTrue(0.3 <= stop_words.load_factor() <= 0.4) self.assertFalse("collision" in stop_words) self.assertTrue("very" in stop_words) self.assertFalse("linear" in stop_words) self.assertTrue("a" in stop_words)
def entry_point(dir_name): ht = HashTableLinear() stop_words = import_stopwords('stop_words.txt', ht) search = SearchEngine(dir_name, stop_words) while True: s = input('Input Search: ') if s == 'q': break scores = search.search(s) print(scores)
def build_stopwords(filename): """ Function to build hash table of stop words from a text list Args: filename (str): path of stop words file """ hash_table = HashTableLinear() stop_words = import_stopwords(filename, hash_table) return stop_words
def test_import_stopwords(self): hashtable = import_stopwords("stop_words.txt", HashTableSepchain()) self.assertEqual(hashtable["unless"], "unless") self.assertRaises(KeyError, hashtable.get, "Parth") hashtable = import_stopwords("stop_words.txt", HashTableLinear()) self.assertEqual(hashtable["unless"], "unless") self.assertRaises(KeyError, hashtable.get, "Parth") hashtable = import_stopwords("stop_words.txt", HashTableQuadratic()) self.assertEqual(hashtable["unless"], "unless") self.assertRaises(KeyError, hashtable.get, "Parth")
def test_hash_linear(self): table = HashTableLinear() self.assertEqual(table.table_size, 11) table["3"] = "3" table["2"] = "2" table["4"] = "4" table["5"] = "5" self.assertEqual("5" in table, True) self.assertEqual("6" in table, False) self.assertRaises(KeyError, table.get, "6") table["3"] = "6" self.assertEqual(table["3"], "6") table[chr(40)] = "20" self.assertEqual(table["3"], "6") self.assertEqual(table.num_collisions, 1) table.remove("3") table.remove("4") self.assertRaises(KeyError, table.get, "4") self.assertRaises(KeyError, table.remove, "4")
def test_linear4(self): ht = HashTableLinear() for i in range(22): ht.put(chr(i), i) self.assertEqual(ht.size(), 22) self.assertTrue(ht.load_factor() <= 0.75) self.assertEqual(ht[chr(0)], 0) self.assertEqual(ht[chr(1)], 1) self.assertEqual(ht[chr(19)], 19) self.assertRaises(KeyError, ht.get, 'a') for i in range(22): ht.remove(chr(i)) self.assertFalse(ht.contains(chr(0))) self.assertFalse(ht.contains(chr(1))) self.assertFalse(ht.contains(chr(19))) self.assertRaises(KeyError, ht.remove, 'a')
def search(self, query): """ Search for the query terms in files Args: query (str): query input: e.g. "computer science" Returns: list: a list of tuples: (files_path_name, score) sorted in descending order or relevancy excluding files whose relevancy score is 0. """ terms = self.parse_words([query]) cleaned_terms = [] hash_terms = HashTableLinear() for term in terms: if not hash_terms.contains(term): cleaned_terms.append(term) hash_terms.put(term, term) scores = self.get_scores(cleaned_terms) scores = self.rank(scores) return scores
def main( directory ): #Set the location of the files you want the engine to search through as the directory parameter. hash = HashTableLinear() hash = import_stopwords("stop_words.txt", hash) search = SearchEngine(directory, hash) while True: inp = input("Search here:") if inp == "q": break elif inp == "s:": inp = input("Search multiple things:") print(search.search(inp))
def count_words(self, filename, words): """count words in a file and store the frequency of each word in the term_freqs hash table. Words should not contain stopwords. Also store the total count of words contained in the file in the doc_length hash table. Args: filename (str) : the file name words (list) : a list of words """ #file_lines = self.read_file(filename) #str_list = self.parse_words(file_lines) for i in words: self.term_freqs.put(i, HashTableLinear()) self.doc_freqs.put(filename, 0) for i in words: if i in self.term_freqs and filename in self.term_freqs[i]: self.term_freqs[i][filename] = self.term_freqs[i][filename] + 1 elif i and i in self.term_freqs: self.term_freqs[i].put(filename, 1) elif i: self.term_freqs.put(i, HashTableLinear()) self.term_freqs[i].put(filename, 1) self.doc_freqs[filename] = len(words)
def count_words(self, file_path_name, words): """ Count words in a file and store the frequency of each word in the term_freqs hash table. The keys of the term_freqs hash table shall be words. The values of the term_freqs hash table shall be hash tables (term_freqs is a hash table of hash tables). The keys of the hash tables (inner hash table) stored in the term_freqs shall be file names. Values of inner hash tables shall be the frequencies of words. Args: file_path_name (str): the file name words (list): a list of words """ self.doc_length.put(file_path_name, len(words)) while len(words) > 0: current_word = words[0] word_freq = 0 word_freq = words.count(current_word) try: while True: words.remove(current_word) except ValueError: pass # If the word already in term_freqs, retrieve the doc freq table # otherwise, create a new hash table if current_word in self.term_freqs: freq_hash = self.term_freqs.get(current_word) else: freq_hash = HashTableLinear() freq_hash.put(file_path_name, word_freq) self.term_freqs.put(current_word, freq_hash)
def get_scores(self, terms): """creates a list of scores for each file in corpus The score = weighted frequency / the total word count in the file. Compute this score for each term in a query and sum all the scores. Args: terms (list) : a list of str Returns: list : a list of Pairs, each containing the filename and its relevancy score """ scores = HashTableLinear() for query in terms: if query in self.term_freqs: term_hash = self.term_freqs[query] for i in term_hash.table: if i: if not i.key in scores: scores[i.key] = 0 scores[i.key] += self.get_wf(i.data) for j in range(len(scores.table)): if scores.table[j]: scores.table[j].data /= self.doc_length[scores.table[j].key] else: scores.table[j] = Pair(None, 0) return keys(scores)
def main(): """It takes a directory name as its command line argument and continuously askes for user input on what query terms to search for. It will return the relavent files associated with the query terms or inputing q will exit the function and return None """ search_engine = SearchEngine( sys.argv[1], import_stopwords("stop_words.txt", HashTableLinear())) while True: user_input = input( "Type 's:' and what you would like to search for or type 'q' to exit: " ) if user_input == "q": return if "s:" in user_input: user_input = user_input[2::].lower().strip() print(search_engine.search(user_input))
def count_words(self, filename, words): """ Args: filename (str) : the file name words (list) : a list of words """ for word in words: if word not in self.term_freqs: self.term_freqs[word] = HashTableLinear() self.term_freqs[word][filename] = 1 else: if filename not in self.term_freqs[word]: self.term_freqs[word][filename] = 1 else: self.term_freqs[word][filename] += 1 self.doc_length.put(filename, len(words))
def count_words(self, filename, words): """count words in a file and store the frequency of each word in the term_freqs hash table. Words should not contain stopwords. Also store the total count of words contained in the file in the doc_length hash table. Args: filename (str) : the file name words (list) : a list of words """ self.doc_length[filename] = len(words) for i in words: if not i in self.term_freqs: self.term_freqs[i] = HashTableLinear() if filename in self.term_freqs[i]: self.term_freqs[i][filename] += 1 else: self.term_freqs[i][filename] = 1
def get_scores(self, terms): """ Creates list of scores for each file in corpus. The score = (weighted frequency / total word count in file) Compute the score for each term in a query and sum all the scores. Args: terms (list): a list of strings, raw input string from user query Returns: list: a list of tuples, each containing the filename and its relevancy score """ # scores = HashMap() score_table = HashTable( ) # contains tuples of (filename, weighted_frequency) for query_term in terms: # fetch a hash table of "term" from self.term_freqs query_term_table = self.term_freqs[query_term][1] # for each file in the hash table, add weighted frequency to scores[file] qt_table_keys = query_term_table.keys() for key in qt_table_keys: # key is a file name weighted_frequency = self.get_wf(query_term_table[key][1]) if weighted_frequency != 0: # if this is the second query_term if score_table.contains(key): # new frequency + old frequency old_freq = score_table[key][1] updated_freq = weighted_frequency + old_freq score_table.put(key, updated_freq) # if score_table[key] is empty, use put (if first query_term) else: score_table.put(key, weighted_frequency) # for each file in scores, do scores[file] /= self.doc_length[file] score_table_keys = score_table.keys() score_list = [] for key in score_table_keys: # key is a filename normalized_score = score_table[key][1] / self.doc_length[key][1] score_table[key] = normalized_score score_list.append(score_table[key]) # return scores, which is a list of tuples neglecting terms with frequencies of 0 return score_list
def main(): # execute unit tests directory = input("please enter a directory name\n") yeet = True while yeet: command = input("press q to exit\n" "press s to search\n" "What would you like to do?\n") if command == "q": break elif command == "s": search = SearchEngine( directory, import_stopwords("stop_words.txt", HashTableLinear())) else: print("that is not a valid command\n") continue new_query = [input("what would you like to search?\n")] query_string = search.parse_words(new_query) search.search(query_string)
def __init__(self, directory, stopwords): self.doc_length = HashTable() self.doc_freqs = HashTable() #this will not be used in this assignment self.term_freqs = HashTable() self.stopwords = stopwords self.index_files(directory)
def __init__(self, directory, stopwords): self.doc_length = HashTableLinear() self.term_freqs = HashTableLinear() self.stopwords = stopwords self.index_files(directory)
class SearchEngine: """ Builds and maintains an inverted index of documents stored in a specified directory and provides a functionality to search documents with query terms Attributes: directory (str): a directory name stopwords (HashTable): contains stopwords doc_length (HashTable): contains number of words in each document doc_freqs (HashTable): contains number of documents containing the term for each term term_freqs (HashTable): hash table of hash tables for each term, each hash table contains the frequency of the term in documents (document names are the keys and the frequencies are the values) """ def __init__(self, directory, stopwords): self.doc_length = HashTable() self.doc_freqs = HashTable() #this will not be used in this assignment self.term_freqs = HashTable() self.stopwords = stopwords self.index_files(directory) # PREPROCESSING ================================================================================ def read_file(self, infile): """ A helper function to read a file Args: infile (str): the path to a file Returns: list: a list of strings read from a file """ # open file with open(infile, "r") as filepointer: lines = filepointer.readlines( ) # looks like ["line 1 here", "line 2 here"] return lines def parse_words(self, lines): """ Split strings into words, convert words to lower cases and remove newline characters, exclude stopwords Args: lines (list): a list of lists of strings Returns: list: a list of words """ raw_words = [] for line in lines: split_line = line.split( " ") # split line looks like ["line", "1", "here"] raw_words.extend(split_line) # create new list with all words that aren't stop words filtered_words = [ word.rstrip().lower() for word in raw_words if word not in self.stopwords ] return filtered_words def count_words(self, filename, words): """ Count words in a file and store the frequency of each word in the term_freqs hash table. Words should not contain stopwords. Also store the total count of words contained in the file in the doc_length hash table. Args: filename (str): the file name words (list): a list of words """ # store total count of words in the doc_length hash table self.doc_length.put(filename, len(words)) # iterate through each word for word in words: # calculate frequency of this word in this document word_frequency = words.count( word) # returns number of occurences of this word in words # if word is already in term_freqs if self.term_freqs.contains(word): # add new ("doc1", freq) pair to term_freqs[word] (which is the lower hashtable) self.term_freqs[word][1].put(filename, word_frequency) # if word is not already in term_freqs else: # create new frequency hashtable for each term ("doc1", frequency) freq_hashtable = HashTable() freq_hashtable.put(filename, word_frequency) # put this newly created hash table into term_freqs hash table self.term_freqs.put(word, freq_hashtable) def index_files(self, directory): """ Index all text files in a given directory Args: directory (str) : the path of a directory """ # get a list of files in the directory file_list = os.listdir(directory) # for each item in file_list, item is a filename for item in file_list: # construct full path of each file path = os.path.join(directory, item) # if item is not a file, skip it if not os.path.isfile(path) or item == "stop_words.txt": continue # split path into file extension and the rest parts = os.path.splitext( item) # maybe change item stuff here to path # only process text files if parts[1] == ".txt": # process it item_lines = self.read_file(path) item_words = self.parse_words(item_lines) self.count_words(path, item_words) # SEARCHING ==================================================================================== def get_wf(self, term_frequency): """ Computes the weighted frequency Args: term_frequency (float): term frequency Returns: float: the weighted frequency """ if term_frequency > 0: weighted_freq = 1 + math.log(term_frequency) else: weighted_freq = 0 return weighted_freq def get_scores(self, terms): """ Creates list of scores for each file in corpus. The score = (weighted frequency / total word count in file) Compute the score for each term in a query and sum all the scores. Args: terms (list): a list of strings, raw input string from user query Returns: list: a list of tuples, each containing the filename and its relevancy score """ # scores = HashMap() score_table = HashTable( ) # contains tuples of (filename, weighted_frequency) for query_term in terms: # fetch a hash table of "term" from self.term_freqs query_term_table = self.term_freqs[query_term][1] # for each file in the hash table, add weighted frequency to scores[file] qt_table_keys = query_term_table.keys() for key in qt_table_keys: # key is a file name weighted_frequency = self.get_wf(query_term_table[key][1]) if weighted_frequency != 0: # if this is the second query_term if score_table.contains(key): # new frequency + old frequency old_freq = score_table[key][1] updated_freq = weighted_frequency + old_freq score_table.put(key, updated_freq) # if score_table[key] is empty, use put (if first query_term) else: score_table.put(key, weighted_frequency) # for each file in scores, do scores[file] /= self.doc_length[file] score_table_keys = score_table.keys() score_list = [] for key in score_table_keys: # key is a filename normalized_score = score_table[key][1] / self.doc_length[key][1] score_table[key] = normalized_score score_list.append(score_table[key]) # return scores, which is a list of tuples neglecting terms with frequencies of 0 return score_list def rank(self, scores): """ Ranks files in the descending order of relevancy Args: scores (list): list of tuples of (filename, score) Returns: list: a list of filenames sorted in descending order of relevancy """ return sorted(scores, key=lambda x: x[1], reverse=True) def search(self, query): """ Search for the query terms in files Args: query (str): query input, "user input goes here" Returns: list: a list of files in descending order of relevancy """ # parse words filtered_query = self.parse_words([query]) # remove duplicate words using a hash table word_table = HashTable() for word in filtered_query: word_table.put(word, word) word_table_keys = word_table.keys() parsed_query_terms = [] # changes from string to a list # add all words from hash table to list using keys() for key in word_table_keys: parsed_query_terms.append(word_table[key][0]) # pass query terms to get_scores() tuples = self.get_scores(parsed_query_terms) # pass resulting list of tuples to rank() results = self.rank(tuples) # rank's result will be displayed in descending order on screen for a_tuple in results: print(a_tuple[0])
def test_HashTableLinear(self): t = HashTableLinear() self.assertEqual(t.size(), 0) self.assertFalse(t.contains('us')) self.assertRaises(KeyError, t.get, 'us') t.put('us', 'us') self.assertEqual(t.get('us'), 'us') self.assertEqual(t['us'], 'us') self.assertTrue(t.contains('us')) self.assertFalse(t.contains('say')) self.assertEqual(t.size(), 1) self.assertEqual(t.collisions(), 0) t.put('say', 'say') self.assertEqual(t.get('say'), 'say') self.assertTrue(t.contains('say')) self.assertEqual(t.size(), 2) self.assertEqual(t.collisions(), 1) t.remove('say') self.assertFalse(t.contains('say')) self.assertTrue(t.contains('us')) t.remove('us') self.assertEqual(t.size(), 0) # print(hash_string('the', 11)) # 'the' = 5 t.put('us', 'us') t.put('say', 'say') # self.assertEqual(t.load_factor(), 0.18181818181818182) t.put('the', 'the') # t.put(chr(0), chr(0)) # t.put('0', '0') # print('chr 0', chr(0)) # print('just 0', '0') # print(type(chr(0))) # print(type('0')) # print(hash_string('us', 23)) # 'the' = 5 # print(hash_string('say', 23)) # 'the' = 5 # print(hash_string('the', 23)) # 'the' = 5 # print('from tests', t) self.assertTrue(t.contains('us')) self.assertTrue(t.contains('the'))