コード例 #1
0
    def test_sorting(self):
        """
        Test that the resolver sorts the tokens in descending order of score.
        """
        """
        Create the test data
        """
        tokenizer = Tokenizer(min_length=3, stem=False, case_fold=True)
        posts = [
            "Manchester United falter against Tottenham Hotspur",
            "Manchester United unable to avoid defeat to Tottenham",
            "Tottenham lose again",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        candidates = TokenExtractor().extract(corpus)
        scores = TFScorer().score(candidates)
        scores = ThresholdFilter(0).filter(scores)
        resolved, unresolved = TokenResolver(tokenizer, corpus).resolve(scores)
        self.assertEqual('tottenham', resolved[0])
        self.assertEqual(set(['manchester', 'united']), set(resolved[1:3]))
        self.assertEqual(
            set([
                'falter', 'against', 'hotspur', 'unable', 'avoid', 'defeat',
                'lose', 'again'
            ]), set(resolved[3:]))
コード例 #2
0
    def test_extrapolate_returns_related_participants(self):
        """
        Test that when extrapolating, related participants are returned.
        """
        """
        Create the test data
        """
        tokenizer = Tokenizer(stem=True,
                              stopwords=list(stopwords.words("english")))
        posts = [
            "The LigaPro is the second-highest division of the Portuguese football league system.",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]
        extrapolator = WikipediaExtrapolator(corpus,
                                             tokenizer,
                                             TF(),
                                             first_level_links=15,
                                             second_level_links=15)
        participants = extrapolator.extrapolate([
            'Associação Académica de Coimbra – O.A.F.',
            'Académico de Viseu F.C.', 'S.L. Benfica B', 'FC Porto B'
        ])

        other_participants = [
            'Casa Pia A.C.', 'G.D. Chaves', 'C.D. Cova da Piedade',
            'S.C. Covilhã', 'G.D. Estoril Praia', 'S.C. Farense',
            'C.D. Feirense', 'Leixões S.C.', 'C.D. Mafra', 'C.D. Nacional',
            'U.D. Oliveirense', 'F.C. Penafiel', 'Varzim S.C.',
            'U.D. Vilafranquense'
        ]
        self.assertGreaterEqual(
            len(set(participants).intersection(set(other_participants))), 4)
コード例 #3
0
 def test_sorting(self):
     """
     Test that the resolver sorts the tokens in descending order of score.
     """
     """
     Create the test data
     """
     tokenizer = Tokenizer(min_length=3, stem=False, case_fold=True)
     posts = [
         "Manchester United falter against Tottenham Hotspur",
         "Manchester United unable to avoid defeat to Tottenham",
         "Tottenham lose again",
     ]
     corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]
     """
     Ensure that the more common candidates are ranked towards the beginning.
     """
     candidates = TokenExtractor().extract(corpus)
     scores = TFScorer().score(candidates)
     scores = ThresholdFilter(0).filter(scores)
     self.assertTrue(scores)
     resolved, unresolved = Resolver().resolve(scores)
     self.assertEqual(set(scores.keys()), set(resolved))
     self.assertEqual([], unresolved)
     self.assertEqual('tottenham', resolved[0])
     self.assertEqual(set(['manchester', 'united']), set(resolved[1:3]))
コード例 #4
0
    def test_threshold_filter(self):
        """
        Test the basic functionality of the threshold filter.
        """

        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]

        extractor = EntityExtractor()
        scorer = TFScorer()
        filter = ThresholdFilter(0.75)

        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates)

        self.assertEqual(1, scores.get('erdogan', 0))
        self.assertEqual(0.5, scores.get('damascus', 0))

        scores = filter.filter(scores)
        self.assertTrue('erdogan' in scores)
        self.assertFalse('damascus' in scores)
コード例 #5
0
    def test_zero_threshold(self):
        """
        Test that when a threshold of zero is given, all candidate participants are retained.
        """

        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Erdogan with threats to attack regime forces 'everywhere' in Syria",
            "Damascus says Erdogan 'disconnected from reality' after threats",
        ]

        corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]

        extractor = EntityExtractor()
        scorer = TFScorer()
        filter = ThresholdFilter(0)

        candidates = extractor.extract(corpus)
        scores = scorer.score(candidates)

        self.assertEqual(1, scores.get('erdogan', 0))
        self.assertEqual(0.5, scores.get('damascus', 0))

        scores = filter.filter(scores)
        self.assertTrue('erdogan' in scores)
        self.assertTrue('damascus' in scores)
コード例 #6
0
    def test_score_relevance(self):
        """
        Test that when two documents are provided, one more relevant than the other, the score reflects it.
        """

        tokenizer = Tokenizer(min_length=2, stem=True)
        candidate = "Ronaldo"
        candidate_document = Document(candidate, tokenizer.tokenize(candidate))
        text = "Ronaldo, speaking after Juventus' victory, says Serie A is still wide open"
        domain = Document(text, tokenizer.tokenize(text))

        title_1 = "Cristiano Ronaldo"
        text_1 = "Cristiano Ronaldo is a Portuguese professional footballer who plays as a forward for Serie A club Juventus."
        title_document_1 = Document(title_1, tokenizer.tokenize(title_1))
        sentence_document_1 = Document(text_1, tokenizer.tokenize(text_1))

        title_2 = "Ronaldo"
        text_2 = "Ronaldo is a Brazilian former professional footballer who played as a striker."
        title_document_2 = Document(title_2, tokenizer.tokenize(title_2))
        sentence_document_2 = Document(text_2, tokenizer.tokenize(text_2))

        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        score_1 = resolver._compute_score(candidate_document, title_document_1,
                                          domain, sentence_document_1)
        score_2 = resolver._compute_score(candidate_document, title_document_2,
                                          domain, sentence_document_2)
        self.assertGreater(score_1, score_2)
コード例 #7
0
    def test_year_check_range(self):
        """
        Test that when checking for a year in a range, the function returns `True`.
        """

        article = '2019–20 Premier League'
        extrapolator = WikipediaExtrapolator([], Tokenizer(), TF())
        self.assertTrue(extrapolator._has_year(article))

        article = '2019-20 Premier League'
        extrapolator = WikipediaExtrapolator([], Tokenizer(), TF())
        self.assertTrue(extrapolator._has_year(article))
コード例 #8
0
    def test_year_check_range(self):
        """
        Test that when checking for a year in a range, the function returns `True`.
        """

        article = '2019–20 Premier League'
        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        self.assertTrue(resolver._has_year(article))

        article = '2019-20 Premier League'
        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        self.assertTrue(resolver._has_year(article))
コード例 #9
0
    def test_extract_empty_tweet(self):
        """
        Test that the TwitterNER entity extractor returns no candidates from an empty tweet.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [""]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TwitterNEREntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertEqual(1, len(candidates))
        self.assertEqual([], candidates[0])
コード例 #10
0
    def test_multiple_sentences(self):
        """
        Test that the entity extractor is capable of extracting named entities from multiple sentences.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "The downward spiral continues for Lyon. Bruno Genesio under threat.",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = EntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertEqual(set(["lyon", "bruno genesio"]), set(candidates[0]))
コード例 #11
0
    def test_named_entity_at_end(self):
        """
        Test that the entity extractor is capable of extracting named entities at the end of a sentence.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Spiral continues for Lyon",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = EntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertTrue("lyon" in set(candidates[0]))
コード例 #12
0
    def test_extract_multiword_entities(self):
        """
        Test that the TwitterNER entity extractor is capable of extracting multi-word entities.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Lyon were delivered by Karl Toko Ekambi",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TwitterNEREntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertEqual(['lyon', 'karl toko ekambi'], candidates[0])
コード例 #13
0
    def test_edge_centrality_multiple(self):
        """
        Test that the edge centrality correctly identifies the most central edge when there are two such edges.
        This edge should be the one with the lowest weight.
        """

        nodes = ['A', 'B', 'C', 'D', 'W', 'X', 'Y', 'Z']
        edges = {
            ('A', 'B', 0.1),
            ('A', 'C', 0.1),
            ('A', 'D', 0.1),
            ('B', 'C', 0.1),
            ('B', 'D', 0.1),
            ('C', 'D', 0.1),
            ('W', 'X', 0.1),
            ('W', 'Y', 0.1),
            ('W', 'Z', 0.1),
            ('X', 'Y', 0.1),
            ('X', 'Z', 0.1),
            ('Y', 'Z', 0.1),
            ('D', 'W', 0.1),
            ('C', 'X', 0.05),
        }

        graph = nx.Graph()
        graph.add_nodes_from(nodes)
        graph.add_weighted_edges_from(edges)

        extrapolator = WikipediaExtrapolator([], Tokenizer(), TF())
        self.assertEqual(('C', 'X'), extrapolator._most_central_edge(graph))
コード例 #14
0
    def test_add_to_graph_low_threshold(self):
        """
        Test adding nodes and edges to a graph with a low threshold.
        """

        graph = nx.Graph()
        links = {
            'Olympique Lyonnais': ['Ligue 1', 'AS Monaco'],
        }

        tokenizer = Tokenizer(stem=True, stopwords=stopwords.words('english'))
        extrapolator = WikipediaExtrapolator([], tokenizer, TF())
        extrapolator._add_to_graph(graph, links, threshold=0)
        self.assertEqual(3, len(graph.nodes))
        self.assertEqual(2, len(graph.edges))
        self.assertTrue('Olympique Lyonnais' in graph.nodes)
        self.assertTrue(
            len(graph.nodes['Olympique Lyonnais']['document'].dimensions))
        self.assertTrue('Ligue 1' in graph.nodes)
        self.assertTrue('AS Monaco' in graph.nodes)
        self.assertTrue(('Olympique Lyonnais', 'Ligue 1') in graph.edges)
        self.assertTrue(('Olympique Lyonnais', 'AS Monaco') in graph.edges)
        self.assertFalse(('Ligue 1', 'AS Monaco') in graph.edges)
        self.assertGreater(
            graph.edges[('Olympique Lyonnais', 'Ligue 1')]['weight'], 0)
コード例 #15
0
def filter_tweets(tweets: Dict[int, Tweet],
                  tokenizer: Tokenizer,
                  max_hashtags: int = 2,
                  max_urls: int = 2) -> Dict[int, Tweet]:
    """
    :param tweets: list of tweets
    :param tokenizer:  nlp.tokenizer.Tokenizer
    :param max_hashtags: max hashtags allowed
    :param max_urls: max urls allowed
    :return: list of filtered pairs (tweet_obj, url_obj)
    """
    logger.info(
        f"Filtering out tweets with more than {max_hashtags} hashtags or {max_urls} urls"
    )

    filtered_tweets = dict()
    for tweet_id, count in tqdm(zip(
            tweets.keys(),
            tokenizer.count_special_tokens(
                [tweet.text for tweet in tweets.values()])),
                                desc="filter_tweets | Tokenizing tweets",
                                total=len(tweets)):
        no_htg, no_url = count

        if no_htg <= max_hashtags and no_url <= max_urls:
            filtered_tweets[tweet_id] = tweets[tweet_id]

    logger.info(f"Ending up with {len(filtered_tweets)} (tweet, url) pairs")
    return filtered_tweets
コード例 #16
0
    def test_repeated_named_entities(self):
        """
        Test that the entity extractor does not filter named entities that appear multiple times.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "The downward spiral continues for Lyon. Lyon coach Bruno Genesio under threat.",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = EntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertEqual(set(["lyon", "bruno genesio"]), set(candidates[0]))
コード例 #17
0
    def test_named_entity_at_start(self):
        """
        Test that the entity extractor is capable of extracting named entities at the start of a sentence.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Liverpool falter again",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = EntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertTrue("liverpool" in set(candidates[0]))
コード例 #18
0
    def test_year_check_long_number(self):
        """
        Test that when checking for a year with a long number, the function does not detect a year.
        """

        article = '1234567890'
        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        self.assertFalse(resolver._has_year(article))
コード例 #19
0
    def test_year_check(self):
        """
        Test that when checking for a year, the function returns a boolean.
        """

        article = 'Youssouf Koné (footballer, born 1995)'
        extrapolator = WikipediaExtrapolator([], Tokenizer(), TF())
        self.assertTrue(extrapolator._has_year(article))
コード例 #20
0
    def test_year_check(self):
        """
        Test that when checking for a year, the function returns a boolean.
        """

        article = 'Youssouf Koné (footballer, born 1995)'
        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        self.assertTrue(resolver._has_year(article))
コード例 #21
0
    def test_get_first_sentence_full_without_period(self):
        """
        Test that when getting the first sentence from a text that has only one sentence, but without punctuation, the whole text is returned.
        """

        text = "Youssouf Koné (born 5 July 1995) is a Malian professional footballer who plays for French side Olympique Lyonnais and the Mali national team as a left-back"
        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        self.assertEqual(text, resolver._get_first_sentence(text))
コード例 #22
0
    def test_year_check_long_number(self):
        """
        Test that when checking for a year with a long number, the function does not detect a year.
        """

        article = '1234567890'
        extrapolator = WikipediaExtrapolator([], Tokenizer(), TF())
        self.assertFalse(extrapolator._has_year(article))
コード例 #23
0
    def test_get_first_sentence_empty(self):
        """
        Test that when getting the first sentence from an empty string, an empty string is returned.
        """

        text = ""
        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        self.assertEqual(text, resolver._get_first_sentence(text))
コード例 #24
0
    def test_remove_brackets(self):
        """
        Test that when removing brackets, they are completely removed.
        """

        article = 'Youssouf Koné (footballer, born 1995)'
        extrapolator = WikipediaExtrapolator([], Tokenizer(), TF())
        self.assertEqual('Youssouf Koné',
                         extrapolator._remove_brackets(article).strip())
コード例 #25
0
    def test_named_entity_sorting(self):
        """
        Test that the named entities are sorted in descending order of their frequency.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Tottenham in yet another loss, this time against Chelsea",
            "Another loss for Tottenham as Mourinho sees red",
            "Mourinho's Tottenham lose again",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]
        detector = NERParticipantDetector()

        participants, unresolved, _ = detector.detect(corpus)
        self.assertEqual(['tottenham', 'mourinho', 'chelsea'], participants)
コード例 #26
0
    def test_remove_unclosed_brackets(self):
        """
        Test that when removing brackets that are not closed, they are not removed.
        """

        article = 'Youssouf Koné (footballer, born 1995'
        extrapolator = WikipediaExtrapolator([], Tokenizer(), TF())
        self.assertEqual('Youssouf Koné (footballer, born 1995',
                         extrapolator._remove_brackets(article).strip())
コード例 #27
0
    def test_resolve_empty(self):
        """
        Test that when resolving an empty set of candidates, the resolver returns empty lists.
        """

        resolver = WikipediaNameResolver(TF(), Tokenizer(), 0, [ ])
        resolved, unresolved = resolver.resolve({ })
        self.assertFalse(len(resolved))
        self.assertFalse(len(unresolved))
コード例 #28
0
    def test_extract_order(self):
        """
        Test that the named entities are returned in the correct order.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Memphis Depay, Leo Dubois, Martin Terrier and Karl Toko Ekambi all out injured",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TwitterNEREntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertEqual([
            "memphis depay", 'leo dubois', 'martin terrier', 'karl toko ekambi'
        ], candidates[0])
コード例 #29
0
    def test_extract(self):
        """
        Test the entity extractor with normal input.
        """
        """
        Create the test data.
        """
        tokenizer = Tokenizer(stem=False)
        posts = [
            "Liverpool falter against Tottenham Hotspur",
            "Mourinho under pressure as Tottenham follow with a loss",
        ]
        corpus = [Document(post, tokenizer.tokenize(post)) for post in posts]

        extractor = TwitterNEREntityExtractor()
        candidates = extractor.extract(corpus)
        self.assertEqual(["liverpool", "tottenham"], candidates[0])
        self.assertEqual(["tottenham"], candidates[1])
コード例 #30
0
    def test_remove_unclosed_brackets(self):
        """
        Test that when removing brackets that are not closed, they are not removed.
        """

        article = 'Youssouf Koné (footballer, born 1995'
        resolver = WikipediaSearchResolver(TF(), Tokenizer(), 0, [])
        self.assertEqual('Youssouf Koné (footballer, born 1995',
                         resolver._remove_brackets(article).strip())