示例#1
0
def pytldr_textrank():
    print("\n", "*" * 30, "PYTLDR TEXTRANK", "*" * 30)
    tokenizer = pltdrtoken('english')
    summarizer = TextRankSummarizer(tokenizer)
    summarizer = TextRankSummarizer()
    summary = summarizer.summarize(text, length=4)
    print(summary)
示例#2
0
def pytldr_textrank(text):
    tokenizer = pltdrtoken('english')
    summarizer = TextRankSummarizer(tokenizer)
    # summarizer = TextRankSummarizer()
    summary = summarizer.summarize(text, length=4)
    return summary

# text = 'The contribution of cloud computing and mobile computing technologies lead to the newly emerging mobile cloud com- puting paradigm. Three major approaches have been pro- posed for mobile cloud applications: 1) extending the access to cloud services to mobile devices; 2) enabling mobile de- vices to work collaboratively as cloud resource providers; 3) augmenting the execution of mobile applications on portable devices using cloud resources. In this paper, we focus on the third approach in supporting mobile data stream applica- tions. More specifically, we study how to optimize the com- putation partitioning of a data stream application between mobile and cloud to achieve maximum speed/throughput in processing the streaming data. To the best of our knowledge, it is the first work to study the partitioning problem for mobile data stream applica- tions, where the optimization is placed on achieving high throughput of processing the streaming data rather than minimizing the makespan of executions as in other appli- cations. We first propose a framework to provide runtime support for the dynamic computation partitioning and exe- cution of the application. Different from existing works, the framework not only allows the dynamic partitioning for a single user but also supports the sharing of computation in- stances among multiple users in the cloud to achieve efficient utilization of the underlying cloud resources. Meanwhile, the framework has better scalability because it is designed on the elastic cloud fabrics. Based on the framework, we design a genetic algorithm for optimal computation parti- tion. Both numerical evaluation and real world experiment have been performed, and the results show that the par- titioned application can achieve at least two times better performance in terms of throughput than the application without partitioning.'
# print (pytldr_textrank(text))
示例#3
0
    def __init__(self, source, algorithm, length):
        self.bullets = None
        self.error = None
        self.highlighted_text = None

        text = parse_input(source, extractor='goose')

        if not text:
            text = parse_input(source, extractor='newspaper')

        if algorithm == 'Latent Semantic Analysis':
            summarizer = LsaSummarizer()
        elif algorithm == 'Relevance Score':
            summarizer = RelevanceSummarizer()
        elif algorithm == 'TextRank':
            summarizer = TextRankSummarizer()
        else:
            self.error = 'The summarization algorithm "{0}" does not exist'.format(algorithm)

        if not self.error:
            self.bullets = summarizer.summarize(text, length=length)

            self.highlighted_text = self.get_highlighted_text(text)

            if not self.bullets:
                self.error = "The input text is too short or could not be extracted. If you're submitting a link " \
                             "make sure that it starts with 'http://'. Otherwise, we recommend you copy and paste " \
                             "the text directly below."
示例#4
0
def invokeSummarizer(text, summarizer_type, count):
    if 'lso' == summarizer_type:
        summarizer = LsaOzsoy()
    if 'lsa' == summarizer_type:
        summarizer = LsaSteinberger()
    if 'rel' == summarizer_type:
        summarizer = RelevanceSummarizer()
    if 'tr' == summarizer_type:
        summarizer = TextRankSummarizer()

    summary = summarizer.summarize(text, length=count)
    return summary
示例#5
0
    An opinion poll showed 68 percent of Greeks want a "fair" compromise with euro zone partners while 30 percent said the government should stand tough even if it means reverting to the drachma. The poll found 81 percent want to stay in the euro.

    Deposit outflows in Greece have picked up. JP Morgan bank said that at the current pace Greek banks had only 14 weeks before they run out of collateral to obtain funds from the central bank.

    The ECB has allowed the Greek central bank to provide emergency lending to the banks, but a failure of the debt talks could mean the imposition of capital controls.

    Euro zone member Cyprus was forced to close its banks for two weeks and introduce capital controls during a 2013 crisis. Such controls would need to be imposed when banks are closed. Greek banks are closed next Monday for a holiday.

    (Additional reporting by Yann Le Guernigou, Michael Nienaber, Andrew Callus, Jan Strupczewski, Alastair Macdonald, Adrian Croft, Foo Yun Chee, Robin Emmott, Tom Koerkemeier, Julia Fioretti and Francesca Landini; Writing by Jeremy Gaunt, Paul Taylor and Alastair Macdonald; Editing by Paul Taylor, Giles Elgood and Eric Walsh)
    """

    lsa_o = LsaOzsoy()
    lsa_s = LsaSteinberger()
    relevance = RelevanceSummarizer()
    textrank = TextRankSummarizer()

    print('\n\nLSA Ozsoy:\n')
    summary = lsa_o.summarize(txt, length=5)

    for sentence in summary:
        print(sentence)

    print('\n\nLSA Steinberger:\n')
    summary = lsa_s.summarize(txt, length=5)

    for sentence in summary:
        print(sentence)

    print('\n\nRelevance:\n')
    summary = relevance.summarize(txt, length=5)
示例#6
0
文件: red.py 项目: j777y/a
# Try to load the set when the program starts.
SEEN_POSTS = set()
 
if os.path.exists("seen.pkl"):
    with open("seen.pkl", "rb") as data_file:
        SEEN_POSTS = cPickle.load(data_file)


@atexit.register
def save_seen_posts():
    with open("seen.pkl", "wb") as f:
        cPickle.dump(SEEN_POSTS, f)


def summarize_web_page(url):
    summarizer = TextRankSummarizer()
    return '\n\n'.join(summarizer.summarize(url, length=3))


def main():
    global SEEN_POSTS
 
    reddit = praw.Reddit(user_agent=USER_AGENT)
    reddit.login(REDDIT_USER, REDDIT_PASSWORD)
 
    while True:
        submissions = reddit.get_subreddit("worldnews").get_new(limit=10)
        for submission in submissions:
            if submission.is_self or submission.id in SEEN_POSTS:
                continue
 
示例#7
0
def TextRank(txt, l):
    textrank = TextRankSummarizer()
    return textrank.summarize(txt, length=l)
示例#8
0
文件: example.py 项目: Anhmike/PyTLDR
    An opinion poll showed 68 percent of Greeks want a "fair" compromise with euro zone partners while 30 percent said the government should stand tough even if it means reverting to the drachma. The poll found 81 percent want to stay in the euro.

    Deposit outflows in Greece have picked up. JP Morgan bank said that at the current pace Greek banks had only 14 weeks before they run out of collateral to obtain funds from the central bank.

    The ECB has allowed the Greek central bank to provide emergency lending to the banks, but a failure of the debt talks could mean the imposition of capital controls.

    Euro zone member Cyprus was forced to close its banks for two weeks and introduce capital controls during a 2013 crisis. Such controls would need to be imposed when banks are closed. Greek banks are closed next Monday for a holiday.

    (Additional reporting by Yann Le Guernigou, Michael Nienaber, Andrew Callus, Jan Strupczewski, Alastair Macdonald, Adrian Croft, Foo Yun Chee, Robin Emmott, Tom Koerkemeier, Julia Fioretti and Francesca Landini; Writing by Jeremy Gaunt, Paul Taylor and Alastair Macdonald; Editing by Paul Taylor, Giles Elgood and Eric Walsh)
    """

    lsa_o = LsaOzsoy()
    lsa_s = LsaSteinberger()
    relevance = RelevanceSummarizer()
    textrank = TextRankSummarizer()

    print '\n\nLSA Ozsoy:\n'
    summary = lsa_o.summarize(txt, length=5)

    for sentence in summary:
        print sentence

    print '\n\nLSA Steinberger:\n'
    summary = lsa_s.summarize(txt, length=5)

    for sentence in summary:
        print sentence

    print '\n\nRelevance:\n'
    summary = relevance.summarize(txt, length=5)