def test_memory_usage(self): import tracemalloc import inspect from corpus import InMemoryDocument, InMemoryCorpus from suffixarray import SuffixArray corpus = InMemoryCorpus() corpus.add_document( InMemoryDocument(0, { "a": "o o\n\n\no\n\no", "b": "o o\no \no" })) corpus.add_document(InMemoryDocument(1, {"a": "ba", "b": "b bab"})) corpus.add_document(InMemoryDocument(2, {"a": "o o O o", "b": "o o"})) corpus.add_document(InMemoryDocument(3, {"a": "oO" * 10000, "b": "o"})) corpus.add_document( InMemoryDocument(4, { "a": "cbab o obab O ", "b": "o o " * 10000 })) tracemalloc.start() snapshot1 = tracemalloc.take_snapshot() engine = SuffixArray(corpus, ["a", "b"], self._normalizer, self._tokenizer) snapshot2 = tracemalloc.take_snapshot() tracemalloc.stop() for statistic in snapshot2.compare_to(snapshot1, "filename"): if statistic.traceback[0].filename == inspect.getfile(SuffixArray): self.assertLessEqual(statistic.size_diff, 2000000, "Memory usage seems excessive.")
def test_access_documents(self): from corpus import InMemoryDocument, InMemoryCorpus corpus = InMemoryCorpus() corpus.add_document(InMemoryDocument(0, {"body": "this is a Test"})) corpus.add_document(InMemoryDocument(1, {"title": "prØve", "body": "en to tre"})) self.assertEqual(corpus.size(), 2) self.assertListEqual([d.document_id for d in corpus], [0, 1]) self.assertListEqual([corpus[i].document_id for i in range(0, corpus.size())], [0, 1]) self.assertListEqual([corpus.get_document(i).document_id for i in range(0, corpus.size())], [0, 1])
def assignment_a(): # Use these throughout below. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() # Dump postings for a dummy two-document corpus. print("INDEXING...") corpus = InMemoryCorpus() corpus.add_document(InMemoryDocument(0, {"body": "this is a Test"})) corpus.add_document(InMemoryDocument(1, {"body": "test TEST prØve"})) index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) for (term, expected) in zip(index.get_terms("PRøvE wtf tesT"), [[(1, 1)], [], [(0, 1), (1, 2)]]): print(term) assert term in ["prøve", "wtf", "test"] postings = list(index.get_postings_iterator(term)) for posting in postings: print(posting) assert len(postings) == len(expected) assert [(p.document_id, p.term_frequency) for p in postings] == expected print(index) # Again, for a slightly bigger corpus. print("LOADING...") corpus = InMemoryCorpus("data/mesh.txt") print("INDEXING...") index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) for (term, expected_length) in [("hydrogen", 8), ("hydrocephalus", 2)]: print(term) for posting in index.get_postings_iterator(term): print(posting) assert len(list(index.get_postings_iterator(term))) == expected_length # Test that we merge posting lists correctly. Note implicit test for case- and whitespace robustness. print("MERGING...") merger = PostingsMerger() and_query = ("HIV pROtein", "AND", [11316, 11319, 11320, 11321]) or_query = ("water Toxic", "OR", [3078, 8138, 8635, 9379, 14472, 18572, 23234, 23985] + [i for i in range(25265, 25282)]) for (query, operator, expected_document_ids) in [and_query, or_query]: print(re.sub("\W+", " " + operator + " ", query)) terms = list(index.get_terms(query)) assert len(terms) == 2 postings = [ index.get_postings_iterator(terms[i]) for i in range(len(terms)) ] merged = { "AND": merger.intersection, "OR": merger.union }[operator](postings[0], postings[1]) documents = [ corpus.get_document(posting.document_id) for posting in merged ] print(*documents, sep="\n") assert len(documents) == len(expected_document_ids) assert [d.get_document_id() for d in documents] == expected_document_ids
def test_multiple_fields(self): from corpus import InMemoryDocument, InMemoryCorpus from suffixarray import SuffixArray corpus = InMemoryCorpus() corpus.add_document( InMemoryDocument(0, { "field1": "a b c", "field2": "b c d" })) corpus.add_document(InMemoryDocument(1, { "field1": "x", "field2": "y" })) corpus.add_document(InMemoryDocument(2, { "field1": "y", "field2": "z" })) engine0 = SuffixArray(corpus, ["field1", "field2"], self._normalizer, self._tokenizer) engine1 = SuffixArray(corpus, ["field1"], self._normalizer, self._tokenizer) engine2 = SuffixArray(corpus, ["field2"], self._normalizer, self._tokenizer) self._process_query_and_verify_winner(engine0, "b c", [0], 2) self._process_query_and_verify_winner(engine0, "y", [1, 2], 1) self._process_query_and_verify_winner(engine1, "x", [1], 1) self._process_query_and_verify_winner(engine1, "y", [2], 1) self._process_query_and_verify_winner(engine1, "z", [], None) self._process_query_and_verify_winner(engine2, "z", [2], 1)
def assignment_a_postingsmerger_1(): # A small but real corpus. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() corpus = InMemoryCorpus("./data/mesh.txt") index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) # Test that we merge posting lists correctly. Note implicit test for case- and whitespace robustness. print("MERGING...") merger = PostingsMerger() and_query = ("HIV pROtein", "AND", [11316, 11319, 11320, 11321]) or_query = ("water Toxic", "OR", [3078, 8138, 8635, 9379, 14472, 18572, 23234, 23985] + [i for i in range(25265, 25282)]) for (query, operator, expected_document_ids) in [and_query, or_query]: print(re.sub("\W+", " " + operator + " ", query)) terms = list(index.get_terms(query)) assert len(terms) == 2 postings = [index[terms[i]] for i in range(len(terms))] merged = { "AND": merger.intersection, "OR": merger.union }[operator](postings[0], postings[1]) documents = [corpus[posting.document_id] for posting in merged] print(*documents, sep="\n") assert len(documents) == len(expected_document_ids) assert [d.document_id for d in documents] == expected_document_ids
def main(): import os.path from normalization import BrainDeadNormalizer from tokenization import ShingleGenerator from corpus import InMemoryCorpus from invertedindex import InMemoryInvertedIndex from ranking import BrainDeadRanker from searchengine import SimpleSearchEngine print("Indexing MeSH corpus...") normalizer = BrainDeadNormalizer() tokenizer = ShingleGenerator(3) corpus = InMemoryCorpus(os.path.join(data_path, 'mesh.txt')) index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) ranker = BrainDeadRanker() engine = SimpleSearchEngine(corpus, index) options = {"debug": False, "hit_count": 5, "match_threshold": 0.5} print("Enter a query and find matching documents.") print(f"Lookup options are {options}.") print(f"Tokenizer is {tokenizer.__class__.__name__}.") print(f"Ranker is {ranker.__class__.__name__}.") def evaluator(query): matches = [] engine.evaluate(query, options, ranker, lambda m: matches.append(m)) return matches simple_repl("query", evaluator)
def test_multiple_fields(self): from corpus import InMemoryDocument, InMemoryCorpus from invertedindex import InMemoryInvertedIndex document = InMemoryDocument( 0, { 'felt1': 'Dette er en test. Test, sa jeg. TEST!', 'felt2': 'test er det', 'felt3': 'test TEsT', }) corpus = InMemoryCorpus() corpus.add_document(document) index = InMemoryInvertedIndex(corpus, ['felt1', 'felt3'], self._normalizer, self._tokenizer) posting = next(index.get_postings_iterator('test')) self.assertEqual(posting.document_id, 0) self.assertEqual(posting.term_frequency, 5)
def main(): import os.path from normalization import BrainDeadNormalizer from tokenization import BrainDeadTokenizer from corpus import InMemoryCorpus from ahocorasick import Trie, StringFinder print("Building trie from MeSH corpus...") normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() corpus = InMemoryCorpus(os.path.join(data_path, 'mesh.txt')) dictionary = Trie() for document in corpus: dictionary.add( normalizer.normalize(normalizer.canonicalize(document["body"])), tokenizer) engine = StringFinder(dictionary, tokenizer) print("Enter some text and locate words and phrases that are MeSH terms.") def evaluator(text): matches = [] engine.scan(normalizer.normalize(normalizer.canonicalize(text)), lambda m: matches.append(m)) return matches simple_repl("text", evaluator)
def test_mesh_terms_in_cran_corpus(self): import os.path from corpus import InMemoryCorpus from ahocorasick import Trie, StringFinder mesh = InMemoryCorpus(os.path.join(data_path, 'mesh.txt')) cran = InMemoryCorpus(os.path.join(data_path, 'cran.xml')) trie = Trie() for d in mesh: trie.add(d["body"] or "", self._tokenizer) finder = StringFinder(trie, self._tokenizer) self._scan_buffer_verify_matches(finder, cran[0]["body"], ["wing", "wing"]) self._scan_buffer_verify_matches(finder, cran[3]["body"], ["solutions", "skin", "friction"]) self._scan_buffer_verify_matches(finder, cran[1254]["body"], ["electrons", "ions"])
def assignment_b_stringfinder(): # Use these throughout below. tokenizer = BrainDeadTokenizer() results = [] # Simple test of using a trie-encoded dictionary for efficiently locating substrings in a buffer. trie = Trie() for s in [ "romerike", "apple computer", "norsk", "norsk ørret", "sverige", "ørret", "banan" ]: trie.add(s, tokenizer) finder = StringFinder(trie, tokenizer) buffer = "det var en gang en norsk ørret fra romerike som likte abba fra sverige" print("SCANNING...") results.clear() finder.scan(buffer, lambda m: results.append(m)) print("Buffer \"" + buffer + "\" contains", results) assert [m["match"] for m in results ] == ["norsk", "norsk ørret", "ørret", "romerike", "sverige"] # Find all MeSH terms that occur verbatim in some selected Cranfield documents! Since MeSH # documents are medical terms and the Cranfield documents have technical content, the # overlap probably isn't that big. print("LOADING...") mesh = InMemoryCorpus("data/mesh.txt") cranfield = InMemoryCorpus("data/cran.xml") print("BUILDING...") trie = Trie() for d in mesh: trie.add(d["body"] or "", tokenizer) finder = StringFinder(trie, tokenizer) print("SCANNING...") for (document_id, expected_matches) in [(0, ["wing", "wing"]), (3, ["solutions", "skin", "friction"]), (1254, ["electrons", "ions"])]: document = cranfield.get_document(document_id) buffer = document["body"] or "" results.clear() finder.scan(buffer, lambda m: results.append(m)) print("Cranfield document", document, "contains MeSH terms", results) assert [m["match"] for m in results] == expected_matches
def assignment_a_inverted_index_3(): # tests that multiple fields are handled correctly normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() doc = InMemoryDocument(document_id=0, fields={ 'felt 1': 'Dette er en test. Test, sa jeg. TEST!', 'felt 2': 'test er det', 'felt 3': 'test TEsT', }) corpus = InMemoryCorpus() corpus.add_document(doc) index = InMemoryInvertedIndex(corpus, ['felt 1', 'felt 3'], normalizer, tokenizer) p = next(index.get_postings_iterator('test')) print(f"term-freq: {p.term_frequency} (correct is 5)") assert p.document_id == 0 assert p.term_frequency == 5
def test_mesh_corpus(self): import os.path from corpus import InMemoryCorpus from invertedindex import InMemoryInvertedIndex corpus = InMemoryCorpus(os.path.join(data_path, 'mesh.txt')) index = InMemoryInvertedIndex(corpus, ["body"], self._normalizer, self._tokenizer) self.assertEqual(len(list(index["hydrogen"])), 8) self.assertEqual(len(list(index["hydrocephalus"])), 2)
def test_cran_corpus(self): import os.path from corpus import InMemoryCorpus from suffixarray import SuffixArray corpus = InMemoryCorpus(os.path.join(data_path, 'cran.xml')) engine = SuffixArray(corpus, ["body"], self._normalizer, self._tokenizer) self._process_query_and_verify_winner(engine, "visc", [328], 11) self._process_query_and_verify_winner(engine, "Of A", [946], 10) self._process_query_and_verify_winner(engine, "", [], None) self._process_query_and_verify_winner(engine, "approximate solution", [159, 1374], 3)
def assignment_a_inverted_index_1(): # Use these throughout below. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() # Dump postings for a dummy two-document corpus. print("INDEXING...") corpus = InMemoryCorpus() corpus.add_document(InMemoryDocument(0, {"body": "this is a Test"})) corpus.add_document(InMemoryDocument(1, {"body": "test TEST prØve"})) index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) for (term, expected) in zip(index.get_terms("PRøvE wtf tesT"), [[(1, 1)], [], [(0, 1), (1, 2)]]): print(term) assert term in ["prøve", "wtf", "test"] postings = list(index[term]) for posting in postings: print(posting) assert len(postings) == len(expected) assert [(p.document_id, p.term_frequency) for p in postings] == expected print(index) # Document counts should be correct. assert index.get_document_frequency("wtf") == 0 assert index.get_document_frequency("test") == 2 assert index.get_document_frequency("prøve") == 1
def test_synthetic_corpus(self): from itertools import product, combinations_with_replacement from corpus import InMemoryDocument, InMemoryCorpus from invertedindex import InMemoryInvertedIndex from searchengine import SimpleSearchEngine corpus = InMemoryCorpus() words = ("".join(term) for term in product("bcd", "aei", "jkl")) texts = (" ".join(word) for word in combinations_with_replacement(words, 3)) for text in texts: corpus.add_document(InMemoryDocument(corpus.size(), {"a": text})) engine = SimpleSearchEngine(corpus, InMemoryInvertedIndex(corpus, ["a"], self._normalizer, self._tokenizer)) epsilon = 0.0001 self._process_query_verify_matches("baj BAJ baj", engine, {"match_threshold": 1.0, "hit_count": 27}, (27, 9.0, [0])) self._process_query_verify_matches("baj caj", engine, {"match_threshold": 1.0, "hit_count": 100}, (27, None, None)) self._process_query_verify_matches("baj caj daj", engine, {"match_threshold": 2/3 + epsilon, "hit_count": 100}, (79, None, None)) self._process_query_verify_matches("baj caj", engine, {"match_threshold": 2/3 + epsilon, "hit_count": 100}, (100, 3.0, [0, 9, 207, 2514])) self._process_query_verify_matches("baj cek dil", engine, {"match_threshold": 1.0, "hit_count": 10}, (1, 3.0, [286])) self._process_query_verify_matches("baj cek dil", engine, {"match_threshold": 1.0, "hit_count": 10}, (1, None, None)) self._process_query_verify_matches("baj cek dil", engine, {"match_threshold": 2/3 + epsilon, "hit_count": 80}, (79, 3.0, [13, 26, 273, 286, 377, 3107, 3198])) self._process_query_verify_matches("baj xxx yyy", engine, {"match_threshold": 2/3 + epsilon, "hit_count": 100}, (0, None, None)) self._process_query_verify_matches("baj xxx yyy", engine, {"match_threshold": 2/3 - epsilon, "hit_count": 100}, (100, None, None))
def test_mesh_corpus(self): import os.path from corpus import InMemoryCorpus from invertedindex import InMemoryInvertedIndex from searchengine import SimpleSearchEngine corpus = InMemoryCorpus(os.path.join(data_path, 'mesh.txt')) index = InMemoryInvertedIndex(corpus, ["body"], self._normalizer, self._tokenizer) engine = SimpleSearchEngine(corpus, index) query = "polluTION Water" self._process_two_term_query_verify_matches(query, engine, {"match_threshold": 0.1, "hit_count": 10}, (10, [25274, 25275, 25276])) self._process_two_term_query_verify_matches(query, engine, {"match_threshold": 1.0, "hit_count": 10}, (3, [25274, 25275, 25276]))
def assignment_a_inverted_index_2(): # Use these throughout below. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() # Dump postings for a slightly bigger corpus. print("LOADING...") corpus = InMemoryCorpus("./data/mesh.txt") print("INDEXING...") index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) for (term, expected_length) in [("hydrogen", 8), ("hydrocephalus", 2)]: print(term) for posting in index[term]: print(posting) assert len(list(index[term])) == expected_length
def test_shingled_mesh_corpus(self): import os.path from tokenization import ShingleGenerator from corpus import InMemoryCorpus from invertedindex import InMemoryInvertedIndex from searchengine import SimpleSearchEngine tokenizer = ShingleGenerator(3) corpus = InMemoryCorpus(os.path.join(data_path, 'mesh.txt')) index = InMemoryInvertedIndex(corpus, ["body"], self._normalizer, tokenizer) engine = SimpleSearchEngine(corpus, index) self._process_query_verify_matches("orGAnik kEMmistry", engine, {"match_threshold": 0.1, "hit_count": 10}, (10, 8.0, [4408, 4410, 4411, 16980, 16981])) self._process_query_verify_matches("synndrome", engine, {"match_threshold": 0.1, "hit_count": 10}, (10, 7.0, [1275]))
def assignment_c_simplesearchengine_1(): # Use these throughout below. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() # Load and index MeSH terms. print("LOADING...") corpus = InMemoryCorpus("../data/mesh.txt") print("INDEXING...") inverted_index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) # Do ranked retrieval, using a simple ranker. engine = SimpleSearchEngine(corpus, inverted_index) simple_ranker = BrainDeadRanker() results = [] # Callback for receiving matches. def match_collector(match): results.append(match) print("*** WINNER", match["score"], match["document"]) query = "polluTION Water" for match_threshold in [0.1, 1.0]: print( f"SEARCHING for '{query}' with match threshold {str(match_threshold)}..." ) results.clear() options = { "match_threshold": match_threshold, "hit_count": 10, "debug": False } engine.evaluate(query, options, simple_ranker, match_collector) assert len(results) == {0.1: 10, 1.0: 3}[match_threshold] for (score, document_id) in [(match["score"], match["document"].document_id) for match in results[:3]]: assert score == 2.0 # Both 'pollution' and 'water'. assert document_id in [25274, 25275, 25276] for score in [match["score"] for match in results[3:]]: assert score == 1.0 # Only 'pollution' or 'water', but not both.
def assignment_d_betterranker(): # Use these throughout below. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() results = [] hit_count = 10 # Callback for receiving matches. def match_collector(match): results.append(match) print("*** WINNER", match["score"], match["document"]) # Load and index some English news sentences. Look at the output and compare the two rankers! # The naive ranker assigns equal weight to all words (including stopwords), whereas the improved # ranker does not. The test below for the improved ranker (with document #24 being the winner) # assumes a straightforward implementation of a TF-IDF ranking scheme as described in the # textbook. print("LOADING...") corpus = InMemoryCorpus("data/en.txt") print("INDEXING...") inverted_index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) simple_ranker = BrainDeadRanker() better_ranker = BetterRanker(corpus, inverted_index) engine = SimpleSearchEngine(corpus, inverted_index) for query in ["the terrorism attack and obama"]: options = { "match_threshold": 0.1, "hit_count": hit_count, "debug": False } for ranker in [simple_ranker, better_ranker]: print("SEARCHING for '" + query + "' using " + ranker.__class__.__name__ + "...") results.clear() engine.evaluate(query, options, ranker, match_collector) winner_document_ids = { simple_ranker: [9221, 7263], better_ranker: [24] }[ranker] assert 0 < len(results) <= hit_count assert results[0]["document"].document_id in winner_document_ids
def main(): import os.path from normalization import BrainDeadNormalizer from tokenization import BrainDeadTokenizer from corpus import InMemoryCorpus from naivebayesclassifier import NaiveBayesClassifier print("Initializing naive Bayes classifier from news corpora...") normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() languages = ["en", "no", "da", "de"] training_set = {language: InMemoryCorpus(os.path.join(data_path,f"{language}.txt")) for language in languages} classifier = NaiveBayesClassifier(training_set, ["body"], normalizer, tokenizer) print(f"Enter some text and classify it into {languages}.") print(f"Returned scores are log-probabilities.") def evaluator(text): results = [] classifier.classify(text, lambda m: results.append(m)) return results simple_repl("text", evaluator)
def test_language_detection_trained_on_some_news_corpora(self): import os.path from corpus import InMemoryCorpus from naivebayesclassifier import NaiveBayesClassifier training_set = { language: InMemoryCorpus(os.path.join(data_path, f"{language}.txt")) for language in ["en", "no", "da", "de"] } classifier = NaiveBayesClassifier(training_set, ["body"], self._normalizer, self._tokenizer) self._classify_buffer_and_verify_top_categories( "Vil det riktige språket identifiseres? Dette er bokmål.", classifier, ["no"]) self._classify_buffer_and_verify_top_categories( "I don't believe that the number of tokens exceeds a billion.", classifier, ["en"]) self._classify_buffer_and_verify_top_categories( "De danske drenge drikker snaps!", classifier, ["da"]) self._classify_buffer_and_verify_top_categories( "Der Kriminalpolizei! Haben sie angst?", classifier, ["de"])
def assignment_b_suffixarray_1(): # Use these throughout below. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() # Prepare for some suffix array lookups. print("LOADING...") corpus = InMemoryCorpus("data/cran.xml") print("INDEXING...") engine = SuffixArray(corpus, ["body"], normalizer, tokenizer) results = [] hit_count = 5 # Callback for receiving matches. def match_collector(match): results.append(match) print("*** WINNER", match["score"], match["document"]) # Define the actual test queries. test1 = ("visc", 11, [328]) # Look for {'viscous', 'viscosity', ...}. test2 = ("Of A", 10, [946]) # Test robustness for case and whitespace. test3 = ("", 0, []) # Safety feature: Match nothing instead of everything. test4 = ("approximate solution", 3, [1374, 159]) # Multiple winners. # Test that the simple occurrence ranking works. Be robust towards how ties are resolved. for (query, winner_score, winner_document_ids) in [test1, test2, test3, test4]: print("SEARCHING for '" + query + "'...") results.clear() engine.evaluate(query, { "debug": False, "hit_count": hit_count }, match_collector) if winner_document_ids: assert results[0]["score"] == winner_score assert results[0]["document"].document_id in winner_document_ids assert len(results) <= hit_count else: assert len(results) == 0
def assignment_d_shinglegenerator_2(): # Use these throughout below. normalizer = BrainDeadNormalizer() tokenizer = ShingleGenerator(3) ranker = BrainDeadRanker() results = [] hit_count = 10 # Load MeSH terms. print("LOADING...") corpus = InMemoryCorpus("data/mesh.txt") # Do ranked retrieval, using n-grams (shingles) and a simple ranker. This allows for fuzzy retrieval. print("INDEXING...") inverted_index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) engine = SimpleSearchEngine(corpus, inverted_index) # Callback for receiving matches. def match_collector(match): results.append(match) print("*** WINNER", match["score"], match["document"]) # Test with some mispelled queries. Be robust for arbitrary resolving of ties. for (query, winner_score, winner_document_ids) in [ ("orGAnik kEMmistry", 8.0, [16981, 16980, 4411, 4410, 4408]), ("synndrome", 7.0, [1275]) ]: print("SEARCHING for '" + query + "'...") results.clear() options = { "match_threshold": 0.1, "hit_count": hit_count, "debug": False } engine.evaluate(query, options, ranker, match_collector) assert 0 < len(results) <= hit_count assert results[0]["score"] == winner_score assert results[0]["document"].document_id in winner_document_ids
def main(): import os.path from normalization import BrainDeadNormalizer from tokenization import BrainDeadTokenizer from corpus import InMemoryCorpus from suffixarray import SuffixArray print("Building suffix array from Cranfield corpus...") normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() corpus = InMemoryCorpus(os.path.join(data_path, 'cran.xml')) engine = SuffixArray(corpus, ["body"], normalizer, tokenizer) options = {"debug": False, "hit_count": 5} print("Enter a prefix phrase query and find matching documents.") print(f"Lookup options are {options}.") print("Returned scores are occurrence counts.") def evaluator(query): matches = [] engine.evaluate(query, options, lambda m: matches.append(m)) return matches simple_repl("query", evaluator)
def main(): import os.path from normalization import BrainDeadNormalizer from tokenization import BrainDeadTokenizer from corpus import InMemoryCorpus from invertedindex import InMemoryInvertedIndex print("Building inverted index from Cranfield corpus...") normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() corpus = InMemoryCorpus(os.path.join(data_path, 'cran.xml')) index = InMemoryInvertedIndex(corpus, ["body"], normalizer, tokenizer) print("Enter one or more index terms and inspect their posting lists.") def evaluator(terms): terms = index.get_terms(terms) return { term: list(index.get_postings_iterator(term)) for term in terms } simple_repl("terms", evaluator)
def assignment_e_naivebayes_1(): # Use these throughout below. These are really language-specific functions, so it's a huge # simplification to use these for a language identifier. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() results = [] # Callback for receiving results. Received scores are log-probabilities. def match_collector(match: dict): results.append(match) print("*** WINNER", match["score"], match["category"]) # Use this as the training set for our language identifier. print("LOADING...") training_set = { language: InMemoryCorpus("data/" + language + ".txt") for language in ["en", "no", "da", "de"] } # Assess probabilities from the training set. print("TRAINING...") classifier = NaiveBayesClassifier(training_set, ["body"], normalizer, tokenizer) # Classify some previously unseen text fragments. print("CLASSIFYING...") for (buffer, language) in [ ("Mon tro om det riktige språket identifiseres? Dette er norsk bokmål, forøvrig.", "no"), ("I don't believe that the number of tokens exceeds a billion.", "en"), ("De danske drenge drikker snaps!", "da"), ("Der Kriminalpolizei! Haben sie angst?", "de") ]: print(buffer) results.clear() classifier.classify(buffer, match_collector) assert results[0]["category"] == language
def test_access_postings(self): from corpus import InMemoryDocument, InMemoryCorpus from invertedindex import InMemoryInvertedIndex corpus = InMemoryCorpus() corpus.add_document(InMemoryDocument(0, {"body": "this is a Test"})) corpus.add_document(InMemoryDocument(1, {"body": "test TEST prØve"})) index = InMemoryInvertedIndex(corpus, ["body"], self._normalizer, self._tokenizer) self.assertListEqual(list(index.get_terms("PRøvE wtf tesT")), ["prøve", "wtf", "test"]) self.assertListEqual([(p.document_id, p.term_frequency) for p in index["prøve"]], [(1, 1)]) self.assertListEqual([(p.document_id, p.term_frequency) for p in index.get_postings_iterator("wtf")], []) self.assertListEqual([(p.document_id, p.term_frequency) for p in index["test"]], [(0, 1), (1, 2)]) self.assertEqual(index.get_document_frequency("wtf"), 0) self.assertEqual(index.get_document_frequency("prøve"), 1) self.assertEqual(index.get_document_frequency("test"), 2)
def test_china_example_from_textbook(self): import math from corpus import InMemoryDocument, InMemoryCorpus from naivebayesclassifier import NaiveBayesClassifier china = InMemoryCorpus() china.add_document( InMemoryDocument(0, {"body": "Chinese Beijing Chinese"})) china.add_document( InMemoryDocument(1, {"body": "Chinese Chinese Shanghai"})) china.add_document(InMemoryDocument(2, {"body": "Chinese Macao"})) not_china = InMemoryCorpus() not_china.add_document( InMemoryDocument(0, {"body": "Tokyo Japan Chinese"})) training_set = {"china": china, "not china": not_china} classifier = NaiveBayesClassifier(training_set, ["body"], self._normalizer, self._tokenizer) results = [] classifier.classify("Chinese Chinese Chinese Tokyo Japan", lambda m: results.append(m)) self.assertEqual(len(results), 2) self.assertEqual(results[0]["category"], "china") self.assertAlmostEqual(math.exp(results[0]["score"]), 0.0003, 4) self.assertEqual(results[1]["category"], "not china") self.assertAlmostEqual(math.exp(results[1]["score"]), 0.0001, 4)
def assignment_e_naivebayes_2(): # Use these throughout below. These are really language-specific functions, so it's a huge # simplification to use these for a language identifier. normalizer = BrainDeadNormalizer() tokenizer = BrainDeadTokenizer() results = [] # Callback for receiving results. Received scores are log-probabilities. def match_collector(match: dict): results.append(match) print("*** WINNER", match["score"], match["category"]) # Replicate Example 13.1 on pages 241 and 242 in the textbook. china = InMemoryCorpus() china.add_document(InMemoryDocument(0, {"body": "Chinese Beijing Chinese"})) china.add_document( InMemoryDocument(1, {"body": "Chinese Chinese Shanghai"})) china.add_document(InMemoryDocument(2, {"body": "Chinese Macao"})) not_china = InMemoryCorpus() not_china.add_document(InMemoryDocument(0, {"body": "Tokyo Japan Chinese"})) training_set = {"china": china, "not china": not_china} classifier = NaiveBayesClassifier(training_set, ["body"], normalizer, tokenizer) buffer = "Chinese Chinese Chinese Tokyo Japan" print(buffer) results.clear() classifier.classify(buffer, match_collector) assert len(results) == 2 assert results[0]["category"] == "china" assert results[1]["category"] == "not china" assert math.isclose(math.exp(results[0]["score"]), 0.0003, abs_tol=0.00001) assert math.isclose(math.exp(results[1]["score"]), 0.0001, abs_tol=0.00005)