def getSummarizeFeed(): threadId = request.args['key'] cur = mysql.connection.cursor() query = "SELECT * FROM mails1 WHERE thread_no = " + threadId + ";" result1 = cur.execute(query) mails = cur.fetchall() # print("query : " + query) cur.close() subject = "" sents = [] subject = mails[0]['subject'] for mail in mails: sents.append(mail['content']) # print(sents) ob = summarizer.Summarizer(subject, sents) data = ob.generate_summary() return data
import summarizer summary = summarizer.Summarizer() summary.generate_summaries()
import summarizer import summary_generator as sg import rouge # Fissare le variabili body = "rouge-data/body/" len = "rouge-data/len.txt" destination = "rouge-data/all_systems_new/" result = "lda_result-bis.txt" rouge_script = "rouge-data/ROUGE-1.5.5.pl" data_rouge = "rouge-data/data/" summary_dest = "rouge-data/all_systems_n_bis/" gold = "rouge-data/models/" # Preparare il summarizer s = summarizer.Summarizer(model_path="C:/enwiki_20161220_skip_300.bin") # Preparare il loop num_topic = [2, 3, 4, 5, 6, 7, 8, 9] num_words = [2, 3, 4, 5, 6, 7, 8, 9] print "Start summarizing..." for topic in num_topic: for word in num_words: new_dir = 'topic_' + str(topic) + "_word_" + str(word) print new_dir destination_path = destination + new_dir generator = sg.SummaryGenerator(body_dir_path=body, target_length_path=len, destination_path=destination_path)
model_path = 'C:/Users/Peppo/Desktop/w2vm/enwiki_20161220_skip_300.bin' summary_destination_root = 'C:/grid-search/' script_path = 'C:/Users/Peppo/Desktop/w2vm/rouge4MultiLing/rouge/ROUGE-1.5.5.pl' data_path = 'C:/Users/Peppo/Desktop/w2vm/rouge4MultiLing/rouge/data' gold_standard_path = 'C:/training/summary/' results_path = 'grid-search-results.txt' training_body_path = 'C:/training/body/' training_length_path = 'C:/training/length.txt' # Set your ranges tfidf_values = [0.2, 0.25, 0.3, 0.35] redundancy_values = [0.8, 0.85, 0.9, 0.95] # Init my summarizer print 'Loading model...' s = s.Summarizer(model_path=model_path) print 'done\n' # Generate summaries for all parameters combinations for tfidf_value in tfidf_values: print 'Summarization with tdidf ' + str(tfidf_value) + '...' for redundancy_value in redundancy_values: # Update summarizer's configuration tfidf_str = "".join(str(tfidf_value).split(".")) redundancy_str = "".join(str(redundancy_value).split(".")) # Generate new name for the destination directory new_dir = 'tfidf_' + str(tfidf_str) + "_redundancy_" + str( redundancy_str) destination_path = summary_destination_root + new_dir
# coding=utf-8 from rest_framework import generics from rest_framework import mixins import json from django.http import JsonResponse import summarizer s = summarizer.Summarizer( model_path= "C:/Users/Gianni Mastroscianni/Desktop/Magistrale/Accesso Intelligente all'Informazione ed Elaborazione del Linguaggio Naturale/Progetto/word2vec_models/enwiki_20161220_skip_300.bin" ) class Summary(generics.GenericAPIView, mixins.CreateModelMixin): def post(self, request): # load json data = json.loads(request.body) list = [] redundancy_threshold = data['redundancy_threshold'] tfidf = data['tfidf_threshold'] summary_length = data['summary_length'] query_based_token = data['query_based_token'] # print (query_based_token, "query token") s.set_tfidf_threshold(tfidf) s.set_redundancy_threshold(redundancy_threshold)
import summarizer SUMMARY_LENGTH = 7 input_file = "input_file.txt" # "reason_deep_learning_csv.txt" s = summarizer.Summarizer() s.set_factors(10, 10, 10) summary = s.generate_summary(input_file, SUMMARY_LENGTH) print summary
import newsProvider import summarizer if __name__ == '__main__': newsProvider = newsProvider.NewsProvider("Apple stock", 5) news = newsProvider.getNews() summary = {} for source, text in news.items(): summarize = summarizer.Summarizer(text, source,5) print(source, summarize.summarize())
roseType: 'radius', animationType: 'scale', animationEasing: 'elasticOut' } ] }); ''') return indent(doc.getvalue()) while True: print('----------------------------------------') with open(input("please choose input file:")) as file: reviews = '\n\n'.join(item['review'] for item in json.load(file)) print('parsing text ...') s = summarizer.Summarizer(reviews).summary() for i, f in enumerate(s): print( str(i + 1) + '.', '[' + f.lemma + ']', str(len(f.positive)) + '/' + str(len(f.negative))) print('\tpositive:') for n in f.positive[:3]: print('\t', n.sent) print('\tnegative:') for n in f.negative[:3]: print('\t', n.sent) with open('summary.html', 'w') as file:
def generate_summary(): summary = summarizer.Summarizer() summary.generate_summaries() return ""