def classify_text(): tweet = request.form['tweet'] start = datetime.datetime.now() rankings = data.create_ranking(tweet) end = datetime.datetime.now() print 'getting city rankings takes: %s' % (end - start) start = datetime.datetime.now() top_5_words = feature_selection.top_words_in_tweet(rankings[0][0],tweet) end = datetime.datetime.now() print 'getting top 5 words takes: %s' % (end - start) start = datetime.datetime.now() cty_corpus_dict = data.city_corpus_dict() word_count_dict = cty_corpus_dict[rankings[0][0].name] end = datetime.datetime.now() print 'getting bogus word count dict takes: %s' % (end - start) start = datetime.datetime.now() final_result = [] for word in top_5_words: final_result.append(word) names = [] for i in range(0, len(rankings)): city_name = rankings[i][0].name names.append(city_name) end = datetime.datetime.now() print 'generating lists takes: %s' % (end - start) return render_template("map.html", tweet=tweet, names=names, rankings=rankings, final_result=final_result)
def classify_text(): tweet = request.form['tweet'] start = datetime.datetime.now() rankings = data.create_ranking(tweet) end = datetime.datetime.now() print 'getting city rankings takes: %s' % (end - start) start = datetime.datetime.now() feature_strings_dict = {} city_corpus_leng_dict = {} city_tweet_count_dict = {} for city in cities: corpus_leng = data.find_leng_city_corpus(city) city_corpus_leng_dict[city.name] = corpus_leng city_tweet_count = data.create_region_tweet_count(city) city_tweet_count_dict[city.name] = city_tweet_count feature_strings = feature_selection.included_feature_strings( city, tweet) feature_strings_dict[city.name] = feature_strings end = datetime.datetime.now() print 'getting top 5 words takes: %s' % (end - start) start = datetime.datetime.now() cty_corpus_dict = data.city_corpus_dict() word_count_dict = cty_corpus_dict[rankings[0][0].name] end = datetime.datetime.now() print 'getting bogus word count dict takes: %s' % (end - start) start = datetime.datetime.now() names = [] for i in range(0, len(rankings)): city_name = rankings[i][0].name names.append(city_name) end = datetime.datetime.now() print 'generating lists takes: %s' % (end - start) return render_template("map.html", tweet=tweet, city_tweet_count_dict=city_tweet_count_dict, names=names, city_corpus_leng_dict=city_corpus_leng_dict, feature_strings_dict=feature_strings_dict, rankings=rankings)
def classify_text(): tweet = request.form['tweet'] start = datetime.datetime.now() rankings = data.create_ranking(tweet) end = datetime.datetime.now() print 'getting city rankings takes: %s' % (end - start) start = datetime.datetime.now() feature_strings_dict = {} city_corpus_leng_dict = {} city_tweet_count_dict = {} for city in cities: corpus_leng = data.find_leng_city_corpus(city) city_corpus_leng_dict[city.name] = corpus_leng city_tweet_count = data.create_region_tweet_count(city) city_tweet_count_dict[city.name] = city_tweet_count feature_strings = feature_selection.included_feature_strings(city, tweet) feature_strings_dict[city.name] = feature_strings end = datetime.datetime.now() print 'getting top 5 words takes: %s' % (end - start) start = datetime.datetime.now() cty_corpus_dict = data.city_corpus_dict() word_count_dict = cty_corpus_dict[rankings[0][0].name] end = datetime.datetime.now() print 'getting bogus word count dict takes: %s' % (end - start) start = datetime.datetime.now() names = [] for i in range(0, len(rankings)): city_name = rankings[i][0].name names.append(city_name) end = datetime.datetime.now() print 'generating lists takes: %s' % (end - start) return render_template("map.html", tweet=tweet, city_tweet_count_dict=city_tweet_count_dict, names=names, city_corpus_leng_dict=city_corpus_leng_dict, feature_strings_dict=feature_strings_dict, rankings=rankings)