def get(self): uname = self.get_current_user() user = User.get_user_by_name(uname) order = get_order() card = Card.get_by_porder(order) cid = self.get_argument("cid",None) if cid is not None: card=Card.get_by_cid(cid) article = Article.get_all_Acard(200) usedCard = Card.get_all_card() temp = [] for one in usedCard: if 0 < one.porder and one.porder <= order: temp.append(one) usedCard = temp reArticles = Article.get_all(200) Rarticle = sorted(reArticles,BaseHandler.rank) if len(Rarticle) > 6: Rarticle = Rarticle[:6] Ruser = User.get_all_user(100) if len(Ruser)>9: Ruser = Ruser[:9] if len(usedCard)>3: usedCard = usedCard[:3] self.render("card.html",user=user,Rarticle=Rarticle,Article=article,usedCard=usedCard,Ruser=Ruser,card=card)
def get(self): uname = self.get_current_user() user = User.get_user_by_name(uname) order = get_order() story = Story.get_by_porder(order) sid = self.get_argument("sid",None) if sid is not None: story=Story.get_by_sid(sid) article = Article.get_all_Astory(200) usedStory = Story.get_all_story() temp = [] for one in usedStory: if one.porder > 0 and one.porder <= order: temp.append(one) usedStory = temp reArticles = Article.get_all(200) Rarticle = sorted(reArticles,BaseHandler.rank) Ruser = User.get_all_user(100) if len(Rarticle) > 6: Rarticle=Rarticle[:6] if len(usedStory)>3: usedStory=usedStory[:3] if len(Ruser)>9: Ruser=Ruser[:9] self.render("story.html",user=user,Rarticle=Rarticle,Article=article,usedStory=usedStory,Ruser=Ruser,story=story)
def get(self): root = self.get_secure_cookie('root') if root=='suiyue': current_order = get_order() cards = Card.get_all_card() stories = Story.get_all_story() self.render("control.html", order=current_order, cards=cards, stories=stories) else: self.redirect('/rootlogin')
async def complete_orders(request): json = await request.json() courier_id = 0 order_id = 0 complete_time = 0 for k, v in json.items(): if k == "courier_id" and type(v) is int and v > 0: courier_id = v elif k == "order_id" and type(v) is int and v > 0: order_id = v elif k == "complete_time" and type(v) is str: complete_time = parse(v) else: return web.Response(status=400) if courier_id == 0 or order_id == 0 or complete_time == 0: return web.Response(status=400) order = get_order(order_id, request.app['db']) if order is None or order.courier_id != courier_id: return web.Response(status=400) if order.complete == 0: order.set_complete(request.app['db'], complete_time) return web.json_response(status=200, data={"order_id": order.id})
#print url_text_dictionary #now that we have the entire dictionary of url:text mappings, we can analyze instead wiki_text = process.get_text('http://en.wikipedia.org/wiki/' + term) #creates dictionaries for the summarability and order scores ordered_keywords = order.get_ordered_keywords(wiki_text, keywords) wiki_distribution = process.get_density(wiki_text, keywords) summarability = {} orderability = {} #calculates the summarability score in a dictionary for url, text in url_text_dictionary.iteritems(): current_distribution = process.get_density(text,keywords) summarability[url] = order.get_difference(wiki_distribution, current_distribution) orderability[url] = order.get_order(current_distribution, ordered_keywords) print summarability print "\n\n" print orderability #gets the urls sorted by summarability as a list of (url, score) tuples sorted_summarability = sorted(summarability.iteritems(), key=operator.itemgetter(1)) f = open('dump/sorted_summarability_' + term + ".txt",'w') for (url, score) in sorted_summarability: try: f.write(" URL: " + url + "\n\n") text = url_text_dictionary[url] #print text f.write(text) f.write("\n\n")
#now that we have the entire dictionary of url:text mappings, we can analyze instead wiki_text = process.get_text('http://en.wikipedia.org/wiki/' + term) #creates dictionaries for the summarability and order scores ordered_keywords = order.get_ordered_keywords(wiki_text, keywords) wiki_distribution = process.get_density(wiki_text, keywords) summarability = {} orderability = {} #calculates the summarability score in a dictionary for url, text in url_text_dictionary.iteritems(): current_distribution = process.get_density(text, keywords) summarability[url] = order.get_difference(wiki_distribution, current_distribution) orderability[url] = order.get_order(current_distribution, ordered_keywords) print summarability print "\n\n" print orderability #gets the urls sorted by summarability as a list of (url, score) tuples sorted_summarability = sorted(summarability.iteritems(), key=operator.itemgetter(1)) f = open('generator/dump/sorted_summarability_' + term + ".txt", 'w') for (url, score) in sorted_summarability: try: f.write(" URL: " + url + "\n\n") text = url_text_dictionary[url] #print text f.write(text)
def index(request): if 'query' in request.GET: term = request.GET['query'] #replaces spaces with underscores term = term.replace(" ", "_") print error("****THE TERM IS " + term) else: #defines a term to search for term = "Manifold" #gets the list of all words with associated tf-idf scores words = tfidf.get_tf_idf(term, "wikipedia") #gets the cleaned list after removing list elements with non-alphanumeric charachters cleaned = tfidf.parse(words) cleaned = tfidf.remove_duplicates(cleaned) #gets the list of top words (keywords) keywords = tfidf.get_top_words(10, cleaned) print error(str(keywords)) #gets all combinations of the keywords; this is a list of tuples combinations = process.get_combinations(keywords) try: urls = pickle.load(open("generator/dump/urls_" + term + ".p", "rb")) except: #otherwise, creates and searches for the keywords print "Searching for each keyword set instead, and saving as pickle" urls = [] for (word1, word2) in combinations: print "Searching for " + word1 + ", " + word2 + "." try: results = google.search(term + " " + word1 + " " + word2, "com", "en", 1, 0, 3, 2.0) for i in range(3): next_result = results.next() if not next_result in urls: urls.append(next_result) except: print "HTML request overload." break pickle.dump(urls, open("generator/dump/urls_" + term + ".p", "wb")) #gets the cleaned text for each url try: url_text_dictionary = pickle.load( open("generator/dump/url_text_dictionary_" + term + ".p", "rb")) except: url_text_dictionary = process.get_text_dictionary(urls) pickle.dump( url_text_dictionary, open("generator/dump/url_text_dictionary_" + term + ".p", "wb")) urls = url_text_dictionary.keys() pickle.dump(urls, open("generator/dump/urls_" + term + ".p", "wb")) #gets the titles for each url try: print error("Loading titles.") url_titles = pickle.load( open("generator/dump/url_titles_" + term + ".p", "rb")) print error("Loaded existing titles.") except: print error("Getting titles.") url_titles = process.get_titles(urls) pickle.dump(url_titles, open("generator/dump/url_titles_" + term + ".p", "wb")) #print url_text_dictionary #now that we have the entire dictionary of url:text mappings, we can analyze instead wiki_text = process.get_text('http://en.wikipedia.org/wiki/' + term) #creates dictionaries for the summarability and order scores ordered_keywords = order.get_ordered_keywords(wiki_text, keywords) #print error(str(ordered_keywords)) wiki_distribution = process.get_density(wiki_text, keywords) summarability = {} orderability = {} #calculates the summarability score in a dictionary for url, text in url_text_dictionary.iteritems(): current_distribution = process.get_density(text, keywords) summarability[url] = order.get_difference(wiki_distribution, current_distribution) orderability[url] = order.get_order(current_distribution, ordered_keywords) #print summarability print "\n\n" #print orderability #gets the urls sorted by summarability as a list of (url, score) tuples sorted_summarability = sorted(summarability.iteritems(), key=operator.itemgetter(1)) sorted_orderability = sorted(orderability.iteritems(), key=operator.itemgetter(1)) combined = order.combine_summarability_and_orderability( sorted_summarability, orderability, url_titles) print "Titles: " + str(url_titles) with_titles = [] #adds in the title to the tuple for (url, score) in combined: with_titles.append((url, score, url_titles[url])) return render(request, 'generator/index.html', ({ 'summarability': json.dumps(sorted_summarability), 'orderability': json.dumps(sorted_orderability), 'combined': json.dumps(combined), 'with_titles': json.dumps(with_titles) }))
def main(): global stage global tiket file = open('last_tiket.txt', 'r') tiket = int(file.read()) file.close() while True: answer = get_message() if answer != None: chat_id = answer['chat_id'] text = answer['text'] message_id = answer['message_id'] time_stop = 0 try: if text in command or stage != 0: if text == '/start': send_messege( chat_id, 'Добрый день!\nВы обратились к автоматизированной системе приема и обработки заявок отдела Цифровой дистрибьюции компании 1С:Северо-Запад.' ) send_messege( chat_id, 'Для составления заявки используйте комманду /order и сделуйте инструкциям или комманду /help для получения данных о всех доступных коммандах!' ) send_messege( chat_id, '\n\nПРЕДУПРЕЖДЕНИЕ!\n\nПри продолжении работы с ботом, вы соглашаетесь на обработку Ваших персональных данных.\nОзнакомиться с договором - link' ) stage = 0 file = open('chat_ID.txt', 'r+') text = file.read() if str(chat_id) in text: continue else: file.write('\n' + str(chat_id)) file.close() continue if text == '/help': send_messege( chat_id, 'Комманда:\n/help - получения данных о всех доступных коммандах\n/contact - данные для обратной связи\n/start - для начала работы с ботом\n/order - для составления заявки\n/check - для проверки существующей заявки' ) stage = 0 continue if text == '/contact': send_messege( chat_id, 'Обратная связь.\nСайт: http://1cpd.businesscatalyst.com/\ne-mail: [email protected]\nтел.: 8 (812) 385-15-99' ) stage = 0 continue if text == '/order': stage = 1 tiket = tiket + 1 while True: if stage != 8: messege = 'Ошибка' messege = order.get_order( chat_id, text, stage, tiket) send_messege(chat_id, messege) stage = stage + 1 # pull = [chat_id, tiket, stage] # stage = get_order(chat_id, text, stage, tiket) # time_stop = message_id continue if text == '/check': send_messege(chat_id, 'Введите номер Вашей заявки.') stage = 16 continue if stage == 16: try: if '№' in text: file = open( ' От ' + str(chat_id) + ' заявка ' + str(text) + '.txt', 'r') else: file = open( ' От ' + str(chat_id) + ' заявка №' + str(text) + '.txt', 'r') except Exception: send_messege( chat_id, 'Введены неверные данные или использовался символ "#".\nВведите номер Вашей заявки.' ) else: order_check = file.read() stage = 0 file.close() r = order_check.find('Статус:') print('----' + str(r)) print(len(order_check)) send_messege(chat_id, order_check[r:len(order_check)]) print(order_check[r:len(order_check)]) file.close() continue if message_id != time_stop: stage = get_order(chat_id, text, stage, tiket) time_stop = message_id continue else: send_messege( chat_id, '\n\nПРЕДУПРЕЖДЕНИЕ!\n\nВоспользуйтесь встроенными коммандами:\n/start - для начала работы с ботом\n/order - для составления заявки\n/check - для проверки существующей заявки' ) except: send_messege( chat_id, '\n\nПРЕДУПРЕЖДЕНИЕ!\n\nПрограмная ошибка, перезапустите диалог коммандой /start' ) print(Exception) else: continue sleep(2)
def index(request): if 'query' in request.GET: term = request.GET['query'] #replaces spaces with underscores term = term.replace(" ", "_") print error("****THE TERM IS " + term) else: #defines a term to search for term = "Manifold" #gets the list of all words with associated tf-idf scores words = tfidf.get_tf_idf(term, "wikipedia") #gets the cleaned list after removing list elements with non-alphanumeric charachters cleaned = tfidf.parse(words) cleaned = tfidf.remove_duplicates(cleaned) #gets the list of top words (keywords) keywords = tfidf.get_top_words(10, cleaned) print error(str(keywords)) #gets all combinations of the keywords; this is a list of tuples combinations = process.get_combinations(keywords) try: urls = pickle.load(open ("generator/dump/urls_" + term + ".p", "rb")) except: #otherwise, creates and searches for the keywords print "Searching for each keyword set instead, and saving as pickle" urls = [] for (word1, word2) in combinations: print "Searching for " + word1 + ", " + word2 + "." try: results = google.search(term + " " + word1 + " " + word2, "com", "en", 1, 0, 3, 2.0) for i in range(3): next_result = results.next() if not next_result in urls: urls.append(next_result) except: print "HTML request overload." break pickle.dump(urls, open("generator/dump/urls_" + term + ".p", "wb")) #gets the cleaned text for each url try: url_text_dictionary = pickle.load(open("generator/dump/url_text_dictionary_" + term + ".p", "rb")) except: url_text_dictionary = process.get_text_dictionary(urls) pickle.dump(url_text_dictionary, open("generator/dump/url_text_dictionary_" + term + ".p", "wb")) urls = url_text_dictionary.keys() pickle.dump(urls, open("generator/dump/urls_" + term + ".p", "wb")) #gets the titles for each url try: print error("Loading titles.") url_titles = pickle.load(open("generator/dump/url_titles_" + term + ".p", "rb")) print error("Loaded existing titles.") except: print error("Getting titles.") url_titles = process.get_titles(urls) pickle.dump(url_titles, open("generator/dump/url_titles_" + term + ".p", "wb")) #print url_text_dictionary #now that we have the entire dictionary of url:text mappings, we can analyze instead wiki_text = process.get_text('http://en.wikipedia.org/wiki/' + term) #creates dictionaries for the summarability and order scores ordered_keywords = order.get_ordered_keywords(wiki_text, keywords) #print error(str(ordered_keywords)) wiki_distribution = process.get_density(wiki_text, keywords) summarability = {} orderability = {} #calculates the summarability score in a dictionary for url, text in url_text_dictionary.iteritems(): current_distribution = process.get_density(text,keywords) summarability[url] = order.get_difference(wiki_distribution, current_distribution) orderability[url] = order.get_order(current_distribution, ordered_keywords) #print summarability print "\n\n" #print orderability #gets the urls sorted by summarability as a list of (url, score) tuples sorted_summarability = sorted(summarability.iteritems(), key=operator.itemgetter(1)) sorted_orderability = sorted(orderability.iteritems(), key=operator.itemgetter(1)) combined = order.combine_summarability_and_orderability(sorted_summarability, orderability, url_titles) print "Titles: " + str(url_titles) with_titles = [] #adds in the title to the tuple for (url, score) in combined: with_titles.append((url, score, url_titles[url])) return render(request, 'generator/index.html', ({'summarability': json.dumps(sorted_summarability), 'orderability': json.dumps(sorted_orderability), 'combined': json.dumps(combined), 'with_titles': json.dumps(with_titles)}))