def test_repeated_tokens(self): """ Test that when tokens are repeated, the frequency that is returned is the document frequency. """ """ Create the test data. """ tokenizer = Tokenizer(stem=False) posts = [ "After Erdogan's statement, Damascus says Erdogan 'disconnected from reality' after threats", ] corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ] extractor = TokenExtractor() scorer = DFScorer() candidates = extractor.extract(corpus, tokenizer=tokenizer) scores = scorer.score(candidates, normalize_scores=False) self.assertEqual(1, scores.get('erdogan'))
def test_score_of_unknown_token(self): """ Test that the score of an unknown token is 0. """ """ Create the test data. """ tokenizer = Tokenizer(stem=False) posts = [ "Erdogan with threats to attack regime forces 'everywhere' in Syria", "Damascus says Erdogan 'disconnected from reality' after threats", ] corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ] extractor = TokenExtractor() scorer = DFScorer() candidates = extractor.extract(corpus) scores = scorer.score(candidates) self.assertFalse(scores.get('unknown'))
def test_max_score(self): """ Test that the maximum score is 1 when normalization is enabled. """ """ Create the test data. """ tokenizer = Tokenizer(stem=False) posts = [ "Erdogan with threats to attack regime forces 'everywhere' in Syria", "Damascus says Erdogan 'disconnected from reality' after threats", ] corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ] extractor = TokenExtractor() scorer = DFScorer() candidates = extractor.extract(corpus) scores = scorer.score(candidates) self.assertTrue(all( score <= 1 for score in scores.values() ))
def test_normalization(self): """ Test that when normalization is disabled, the returned scores are integers. """ """ Create the test data. """ tokenizer = Tokenizer(stem=False) posts = [ "Erdogan with threats to attack regime forces 'everywhere' in Syria", "After Erdogan's statement, Damascus says Erdogan 'disconnected from reality' after threats", ] corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ] extractor = TokenExtractor() scorer = DFScorer() candidates = extractor.extract(corpus) scores = scorer.score(candidates, normalize_scores=False) self.assertEqual(2, scores.get('erdogan'))
def test_score_across_multiple_documents(self): """ Test that the score is based on document frequency. """ """ Create the test data. """ tokenizer = Tokenizer(stem=False) posts = [ "Erdogan with threats to attack regime forces 'everywhere' in Syria", "After Erdogan's statement, Damascus says Erdogan 'disconnected from reality' after threats", ] corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ] extractor = TokenExtractor() scorer = DFScorer() candidates = extractor.extract(corpus, tokenizer=tokenizer) scores = scorer.score(candidates, normalize_scores=False) self.assertEqual(2, scores.get('erdogan'))
def test_df_scorer(self): """ Test the basic functionality of the DF scorer. """ """ Create the test data. """ tokenizer = Tokenizer(stem=False) posts = [ "Erdogan with threats to attack regime forces 'everywhere' in Syria", "Damascus says Erdogan 'disconnected from reality' after threats", ] corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ] extractor = TokenExtractor() scorer = DFScorer() candidates = extractor.extract(corpus) scores = scorer.score(candidates) self.assertEqual(1, scores.get('erdogan', 0)) self.assertEqual(0.5, scores.get('damascus', 0)) self.assertEqual(1, scores.get('threats', 0))