Beispiel #1
0
    def test_cue_2(self):
        document = build_document(("ba bb bc bb unknown ľščťžýáíé sb sc sb", ),
                                  ("Pepek likes spinach", ))

        summarizer = EdmundsonSummarizer()
        summarizer.bonus_words = (
            "ba",
            "bb",
            "bc",
        )
        summarizer.stigma_words = (
            "sa",
            "sb",
            "sc",
        )

        sentences = summarizer.cue_method(document, 10)
        self.assertEqual(len(sentences), 2)
        self.assertEqual(to_unicode(sentences[0]),
                         "ba bb bc bb unknown ľščťžýáíé sb sc sb")
        self.assertEqual(to_unicode(sentences[1]), "Pepek likes spinach")

        sentences = summarizer.cue_method(document, 1)
        self.assertEqual(len(sentences), 1)
        self.assertEqual(to_unicode(sentences[0]),
                         "ba bb bc bb unknown ľščťžýáíé sb sc sb")
Beispiel #2
0
    def summarize_with_info(self, corpus, length, algorithm):
        parser = PlaintextParser.from_string(corpus, Tokenizer(self.LANGUAGE))

        if algorithm == "textrank":
            summarizer = TextRankSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "lexrank":
            summarizer = LexRankSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "luhn":
            summarizer = LuhnSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "edmundson":
            summarizer = EdmundsonSummarizer(Stemmer(self.LANGUAGE))
            summarizer.bonus_words = parser.significant_words
            summarizer.stigma_words = parser.stigma_words
        elif algorithm == "kl":
            summarizer = KLSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "lsa":
            summarizer = LsaSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "sumbasic":
            summarizer = SumBasicSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "random":
            summarizer = RandomSummarizer(Stemmer(self.LANGUAGE))
        else:
            raise NotImplemented("Summary algorithm is not available")

        summarizer.stop_words = get_stop_words(self.LANGUAGE)

        return summarizer(parser.document, length)
def test_cue_3():
    document = build_document((
        "ba " * 10,
        "bb " * 10,
        " sa" * 8 + " bb" * 10,
        "bb bc ba",
    ), (), (
        "babbbc " * 10,
        "na nb nc nd sa" + " bc" * 10,
        " ba n" * 10,
    ))
    summarizer = EdmundsonSummarizer()
    summarizer.bonus_words = (
        "ba",
        "bb",
        "bc",
    )
    summarizer.stigma_words = (
        "sa",
        "sb",
        "sc",
    )

    sentences = summarizer.cue_method(document, 5)

    assert list(map(to_unicode, sentences)) == [
        ("ba " * 10).strip(),
        ("bb " * 10).strip(),
        "bb bc ba",
        "na nb nc nd sa bc bc bc bc bc bc bc bc bc bc",
        ("ba n " * 10).strip(),
    ]
Beispiel #4
0
    def test_cue_3(self):
        document = build_document((
            "ba " * 10,
            "bb " * 10,
            " sa" * 8 + " bb" * 10,
            "bb bc ba",
        ), (), (
            "babbbc " * 10,
            "na nb nc nd sa" + " bc" * 10,
            " ba n" * 10,
        ))

        summarizer = EdmundsonSummarizer()
        summarizer.bonus_words = (
            "ba",
            "bb",
            "bc",
        )
        summarizer.stigma_words = (
            "sa",
            "sb",
            "sc",
        )

        sentences = summarizer.cue_method(document, 5)
        self.assertEqual(len(sentences), 5)
        self.assertEqual(to_unicode(sentences[0]), ("ba " * 10).strip())
        self.assertEqual(to_unicode(sentences[1]), ("bb " * 10).strip())
        self.assertEqual(to_unicode(sentences[2]), "bb bc ba")
        self.assertEqual(to_unicode(sentences[3]),
                         "na nb nc nd sa bc bc bc bc bc bc bc bc bc bc")
        self.assertEqual(to_unicode(sentences[4]), ("ba n " * 10).strip())
Beispiel #5
0
    def summarize(self, corpus, length, algorithm):
        parser = PlaintextParser.from_string(corpus, Tokenizer(self.LANGUAGE))

        if algorithm == "textrank":
            summarizer = TextRankSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "lexrank":
            summarizer = LexRankSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "luhn":
            summarizer = LuhnSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "edmundson":
            summarizer = EdmundsonSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "kl":
            summarizer = KLSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "lsa":
            summarizer = LsaSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "sumbasic":
            summarizer = SumBasicSummarizer(Stemmer(self.LANGUAGE))
        elif algorithm == "random":
            summarizer = RandomSummarizer(Stemmer(self.LANGUAGE))
        else:
            raise NotImplemented("Summary algorithm is not available")

        summarizer.stop_words = get_stop_words(self.LANGUAGE)
        summary = " ".join(
            [obj._text for obj in summarizer(parser.document, length)])

        return summary
Beispiel #6
0
    def test_title_method_3(self):
        document = build_document_from_string("""
            # This is cool heading
            Because I am sentence I like words
            And because I am string I like characters

            # blank and heading
            This is next paragraph because of blank line above
            Here is the winner because contains words like cool and heading
        """)

        summarizer = EdmundsonSummarizer()
        summarizer.null_words = (
            "this",
            "is",
            "I",
            "am",
            "and",
        )

        sentences = summarizer.title_method(document, 3)
        self.assertEqual(len(sentences), 3)
        self.assertEqual(to_unicode(sentences[0]),
                         "Because I am sentence I like words")
        self.assertEqual(to_unicode(sentences[1]),
                         "This is next paragraph because of blank line above")
        self.assertEqual(
            to_unicode(sentences[2]),
            "Here is the winner because contains words like cool and heading")
Beispiel #7
0
    def test_key_3(self):
        document = build_document((
            "wa",
            "wa wa",
            "wa wa wa",
            "wa wa wa wa",
            "wa Wa Wa Wa wa",
        ), ("x X x X", ))
        summarizer = EdmundsonSummarizer()
        summarizer.bonus_words = (
            "wa",
            "X",
        )

        sentences = summarizer.key_method(document, 3)
        self.assertEqual(len(sentences), 3)
        self.assertEqual(to_unicode(sentences[0]), "wa wa wa")
        self.assertEqual(to_unicode(sentences[1]), "wa wa wa wa")
        self.assertEqual(to_unicode(sentences[2]), "wa Wa Wa Wa wa")

        sentences = summarizer.key_method(document, 3, weight=0)
        self.assertEqual(len(sentences), 3)
        self.assertEqual(to_unicode(sentences[0]), "wa wa wa wa")
        self.assertEqual(to_unicode(sentences[1]), "wa Wa Wa Wa wa")
        self.assertEqual(to_unicode(sentences[2]), "x X x X")
def test_cue_2():
    document = build_document(("ba bb bc bb unknown ľščťžýáíé sb sc sb", ),
                              ("Pepek likes spinach", ))

    summarizer = EdmundsonSummarizer()
    summarizer.bonus_words = (
        "ba",
        "bb",
        "bc",
    )
    summarizer.stigma_words = (
        "sa",
        "sb",
        "sc",
    )

    sentences = summarizer.cue_method(document, 10)

    assert list(map(to_unicode, sentences)) == [
        "ba bb bc bb unknown ľščťžýáíé sb sc sb",
        "Pepek likes spinach",
    ]

    sentences = summarizer.cue_method(document, 1)

    assert list(map(to_unicode, sentences)) == [
        "ba bb bc bb unknown ľščťžýáíé sb sc sb",
    ]
def test_key_3():
    document = build_document((
        "wa",
        "wa wa",
        "wa wa wa",
        "wa wa wa wa",
        "wa Wa Wa Wa wa",
    ), ("x X x X", ))
    summarizer = EdmundsonSummarizer()
    summarizer.bonus_words = (
        "wa",
        "X",
    )

    sentences = summarizer.key_method(document, 3)
    assert list(map(to_unicode, sentences)) == [
        "wa wa wa",
        "wa wa wa wa",
        "wa Wa Wa Wa wa",
    ]

    sentences = summarizer.key_method(document, 3, weight=0)
    assert list(map(to_unicode, sentences)) == [
        "wa wa wa wa",
        "wa Wa Wa Wa wa",
        "x X x X",
    ]
Beispiel #10
0
    def test_mixed_cue_key(self):
        document = build_document_from_string("""
            # This is cool heading
            Because I am sentence I like words
            And because I am string I like characters

            # blank and heading
            This is next paragraph because of blank line above
            Here is the winner because contains words like cool and heading
        """)

        summarizer = EdmundsonSummarizer(cue_weight=1,
                                         key_weight=1,
                                         title_weight=0,
                                         location_weight=0)
        summarizer.bonus_words = ("cool", "heading", "sentence", "words",
                                  "like", "because")
        summarizer.stigma_words = (
            "this",
            "is",
            "I",
            "am",
            "and",
        )

        sentences = summarizer(document, 2)
        self.assertEqual(len(sentences), 2)
        self.assertEqual(to_unicode(sentences[0]),
                         "Because I am sentence I like words")
        self.assertEqual(
            to_unicode(sentences[1]),
            "Here is the winner because contains words like cool and heading")
def test_location_method_2():
    document = build_document_from_string("""
        # na nb nc ha hb
        ha = 1 + 1 + 0 = 2
        middle = 0
        ha hb = 2 + 1 + 0 = 3

        first = 1
        ha hb ha = 3
        last = 1

        # hc hd
        hb hc hd = 3 + 1 + 0 = 4
        ha hb = 2 + 1 + 0 = 3
    """)

    summarizer = EdmundsonSummarizer()
    summarizer.null_words = (
        "na",
        "nb",
        "nc",
        "nd",
        "ne",
    )

    sentences = summarizer.location_method(document, 4, w_p1=0, w_p2=0)

    assert list(map(to_unicode, sentences)) == [
        "ha hb = 2 + 1 + 0 = 3",
        "ha hb ha = 3",
        "hb hc hd = 3 + 1 + 0 = 4",
        "ha hb = 2 + 1 + 0 = 3",
    ]
def EdmundsonSummary(document, sentences):
    parser = PlaintextParser.from_string(document, Tokenizer("english"))
    summarizer = EdmundsonSummarizer()
    summary = summarizer(parser.document, sentences)
    # for sentence in summary:
    #     print(sentence)
    return summary
def test_title_method_2():
    document = build_document_from_string("""
        # This is cool heading
        Because I am sentence I like words
        And because I am string I like characters

        # blank and heading
        This is next paragraph because of blank line above
        Here is the winner because contains words like cool and heading
    """)

    summarizer = EdmundsonSummarizer()
    summarizer.null_words = (
        "this",
        "is",
        "I",
        "am",
        "and",
    )

    sentences = summarizer.title_method(document, 2)

    assert list(map(to_unicode, sentences)) == [
        "This is next paragraph because of blank line above",
        "Here is the winner because contains words like cool and heading",
    ]
Beispiel #14
0
    def test_location_method_2(self):
        document = build_document_from_string("""
            # na nb nc ha hb
            ha = 1 + 1 + 0 = 2
            middle = 0
            ha hb = 2 + 1 + 0 = 3

            first = 1
            ha hb ha = 3
            last = 1

            # hc hd
            hb hc hd = 3 + 1 + 0 = 4
            ha hb = 2 + 1 + 0 = 3
        """)

        summarizer = EdmundsonSummarizer()
        summarizer.null_words = (
            "na",
            "nb",
            "nc",
            "nd",
            "ne",
        )

        sentences = summarizer.location_method(document, 4, w_p1=0, w_p2=0)
        self.assertEqual(len(sentences), 4)
        self.assertEqual(to_unicode(sentences[0]), "ha hb = 2 + 1 + 0 = 3")
        self.assertEqual(to_unicode(sentences[1]), "ha hb ha = 3")
        self.assertEqual(to_unicode(sentences[2]), "hb hc hd = 3 + 1 + 0 = 4")
        self.assertEqual(to_unicode(sentences[3]), "ha hb = 2 + 1 + 0 = 3")
Beispiel #15
0
    def test_empty_document(self):
        summarizer = EdmundsonSummarizer(cue_weight=0,
                                         key_weight=0,
                                         title_weight=0,
                                         location_weight=0)

        sentences = summarizer(build_document(), 10)
        self.assertEqual(len(sentences), 0)
Beispiel #16
0
def EdmundsonSummary(document, sentences):
    parser = PlaintextParser.from_string(document, Tokenizer("english"))
    summarizer = EdmundsonSummarizer()
    summary = summarizer(parser.document, sentences)
    results = []
    for sentence in summary:
        results.append(str(sentence))
    return results
Beispiel #17
0
    def test_title_method_with_empty_document(self):
        summarizer = EdmundsonSummarizer()
        summarizer.null_words = (
            "ba",
            "bb",
            "bc",
        )

        sentences = summarizer.title_method(build_document(), 10)
        self.assertEqual(len(sentences), 0)
def test_cue_with_no_stigma_words():
    summarizer = EdmundsonSummarizer()
    summarizer.bonus_words = (
        "great",
        "very",
        "beautiful",
    )

    with pytest.raises(ValueError):
        summarizer.cue_method(build_document(), 10)
Beispiel #19
0
    def test_cue_with_no_stigma_words(self):
        summarizer = EdmundsonSummarizer()
        summarizer.bonus_words = (
            "great",
            "very",
            "beautiful",
        )

        self.assertRaises(ValueError, summarizer.cue_method, build_document(),
                          10)
def test_title_method_with_empty_document():
    summarizer = EdmundsonSummarizer()
    summarizer.null_words = (
        "ba",
        "bb",
        "bc",
    )

    sentences = summarizer.title_method(build_document(), 10)
    assert list(map(to_unicode, sentences)) == []
Beispiel #21
0
    def test_location_method_with_empty_document(self):
        summarizer = EdmundsonSummarizer()
        summarizer.null_words = (
            "na",
            "nb",
            "nc",
        )

        sentences = summarizer.location_method(build_document(), 10)
        self.assertEqual(len(sentences), 0)
Beispiel #22
0
    def test_cue_with_no_bonus_words(self):
        summarizer = EdmundsonSummarizer()
        summarizer.stigma_words = (
            "useless",
            "bad",
            "spinach",
        )

        self.assertRaises(ValueError, summarizer.cue_method, build_document(),
                          10)
Beispiel #23
0
    def test_key_empty(self):
        summarizer = EdmundsonSummarizer()
        summarizer.bonus_words = (
            "ba",
            "bb",
            "bc",
        )

        sentences = summarizer.key_method(build_document(), 10)
        self.assertEqual(len(sentences), 0)
def test_cue_with_no_bonus_words():
    summarizer = EdmundsonSummarizer()
    summarizer.stigma_words = (
        "useless",
        "bad",
        "spinach",
    )

    with pytest.raises(ValueError):
        summarizer.cue_method(build_document(), 10)
def test_key_empty():
    summarizer = EdmundsonSummarizer()
    summarizer.bonus_words = (
        "ba",
        "bb",
        "bc",
    )

    sentences = summarizer.key_method(build_document(), 10)

    assert list(map(to_unicode, sentences)) == []
 def __summarize(self, parser):
     summarizer = EdmundsonSummarizer(Stemmer(self.__language))
     # words of high importance
     summarizer.bonus_words = ('info', 'information', 'due', 'overdue',
                               'withdraw', 'balance', 'fee', 'letter',
                               'compliance', 'super')
     # words of low importance or even negative importance?
     summarizer.stigma_words = 'zdfgthdvndadv'
     summarizer.null_words = 'zdfgthdvndadv'
     final_sentences = summarizer(parser.document, self.__sentences_count)
     return self.__join_sentences(final_sentences)
def test_location_method_with_empty_document():
    summarizer = EdmundsonSummarizer()
    summarizer.null_words = (
        "na",
        "nb",
        "nc",
    )

    sentences = summarizer.location_method(build_document(), 10)

    assert list(map(to_unicode, sentences)) == []
Beispiel #28
0
def summarize(text, sentence_count, bonus_words, language='english'):
    '''

    '''
    summarizer = EdmundsonSummarizer(Stemmer(language))
    summarizer.stop_words = get_stop_words(language)
    summarizer.bonus_words = bonus_words
    summarizer.stigma_words = ['zdfgthdvndadv']
    summarizer.null_words = stopwords.words('english')
    summary = summarizer(
        PlaintextParser(text, Tokenizer(language)).document, sentence_count)
    return summary
Beispiel #29
0
def edmundson_summarizer(text, stemmer, language, sentences_count):
    parser = PlaintextParser.from_string(text, Tokenizer(language))
    summarizer_luhn = EdmundsonSummarizer(stemmer)
    summarizer_luhn.stop_words = get_stop_words(language)
    summarizer_luhn.bonus_words = ("computing", "learning", "mobile")
    summarizer_luhn.stigma_words = ("another", "and", "some", "next")
    summarizer_luhn.null_words = ("another", "and", "some", "next")

    sentences = []
    for sentence in summarizer_luhn(parser.document, sentences_count):
        a = sentence
        sentences.append(str(a))
    return "\n".join(sentences)
Beispiel #30
0
def summarize(srt_file, summarizer, n_sentences, language, bonusWords,
              stigmaWords):
    # Converting the srt file to a plain text document and passing in to Sumy library(The text summarization library) functions.
    ##print(srt_to_doc(srt_file))
    parser = PlaintextParser.from_string(srt_to_doc(srt_file),
                                         Tokenizer(language))

    if (summarizer == 'ED'):
        summarizer = EdmundsonSummarizer()

        with open(bonusWords, "r+") as f:
            bonus_wordsList = f.readlines()
            bonus_wordsList = [x.strip() for x in bonus_wordsList]
            f.close()
        with open(stigmaWords, "r+") as f:
            stigma_wordsList = f.readlines()
            stigma_wordsList = [x.strip() for x in stigma_wordsList]
            f.close()

        summarizer.bonus_words = (bonus_wordsList)
        summarizer.stigma_words = (stigma_wordsList)
        summarizer.null_words = get_stop_words(language)
    else:
        stemmer = Stemmer(language)
        summarizer = SUMMARIZERS[summarizer](stemmer)
        summarizer.stop_words = get_stop_words(language)

    ret = []
    summarizedSubtitles = []
    # Now the the document passed is summarized and we can access the filtered sentences along with the no of sentence
    # for sentence in parser.document:
    #     print("sentence ",sentence)
    # print("cod ",srt_file)
    # for ob in srt_file:
    #         sent=srt_to_doc([ob])
    #         print("sent ",sent[4:])

    for sentence in summarizer(parser.document, n_sentences):
        # Index of the sentence
        # print("sentence ",sentence)
        index = int(re.findall("\(([0-9]+)\)", str(sentence))[0])
        # Using the index we determine the subtitle to be selected
        item = srt_file[index]
        # print("item ",item)
        summarizedSubtitles.append(item)

        # add the selected subtitle to the result array
        ret.append(srt_item_to_range(item))

    return ret, summarizedSubtitles