Beispiel #1
0
    def test_max_score(self):
        """
        Test that the maximum score is 1 when normalization is enabled.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TokenExtractor()
        scorer = LogTFScorer()
        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates)
        self.assertTrue(all(score <= 1 for score in scores.values()))
Beispiel #2
0
    def test_score_of_unknown_token(self):
        """
        Test that the score of an unknown token is 0.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TokenExtractor()
        scorer = LogTFScorer()
        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates)
        self.assertFalse(scores.get('unknown'))
Beispiel #3
0
    def test_repeated_tokens(self):
        """
        Test that when tokens are repeated, the frequency that is returned is the document frequency.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "After Erdogan's statement, Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TokenExtractor(tokenizer=tokenizer)
        scorer = LogTFScorer()
        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates, normalize_scores=False)
        self.assertEqual(math.log(2 + 1, 10),
                         scores.get('erdogan'))  # apply Laplace smoothing
Beispiel #4
0
    def test_logarithm_base(self):
        """
        Test that when a logarithmic base is provided, it is used instead of the default base.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "After Erdogan's statement, Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TokenExtractor()
        scorer = LogTFScorer(base=2)
        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates, normalize_scores=False)
        self.assertEqual(math.log(2 + 1, 2),
                         scores.get('erdogan'))  # apply Laplace smoothing
Beispiel #5
0
    def test_normalization(self):
        """
        Test that when normalization is disabled, the returned scores are integers.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "After Erdogan's statement, Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TokenExtractor()
        scorer = LogTFScorer()
        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates, normalize_scores=False)
        self.assertEqual(math.log(2 + 1, 10),
                         scores.get('erdogan'))  # apply Laplace smoothing
Beispiel #6
0
    def test_score_across_multiple_documents(self):
        """
        Test that the score is based on document frequency.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "After Erdogan's statement, Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TokenExtractor(tokenizer=tokenizer)
        scorer = LogTFScorer()
        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates, normalize_scores=False)
        self.assertEqual(math.log(3 + 1, 10),
                         scores.get('erdogan'))  # apply Laplace smoothing
Beispiel #7
0
    def test_log_tf_scorer(self):
        """
        Test the basic functionality of the logarithmic TF scorer.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TokenExtractor()
        scorer = LogTFScorer()
        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates)
        self.assertEqual(1, scores.get('erdogan', 0))
        self.assertEqual(
            math.log(1 + 1, 10) / math.log(2 + 1, 10),
            scores.get('damascus', 0))
        self.assertEqual(1, scores.get('threats', 0))