def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets_by_state = {} tweets = load_tweets(make_tweet, term) # A list of tweets containing term us_centers = {n: find_center(s) for n, s in us_states.items()} for twit in tweets: state = find_closest_state(twit,us_centers) if state in tweets_by_state: tweets_by_state[state] += 1 else: tweets_by_state[state] = 1 max = 0 most_talkative = "" for state in tweets_by_state: if tweets_by_state[state] > max: max = tweets_by_state[state] most_talkative = state return most_talkative
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term tweets_by_state = group_tweets_by_state(tweets) talkative_states = us_states.fromkeys(us_states, 0) for state, tweet_list in tweets_by_state.items(): for tweet in tweet_list: for word in tweet_words(tweet): if word == term: talkative_states[state] += 1 best_count = None most_talkative_state = None for state, count_term in talkative_states.items(): if best_count == None: best_count = count_term most_talkative_state = state continue else: if count_term > best_count: best_count = count_term most_talkative_state = state return most_talkative_state
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in any order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term """ lst, cpt = group_tweets_by_state(tweets), 0 for elem in lst: freq = len(lst[elem]) if freq > cpt: cpt = freq most_talkative = elem return (most_talkative, cpt) """ dic, lst = group_tweets_by_state(tweets), [] for elem in group_tweets_by_state(tweets): freq = len(dic[elem]) lst.append((elem,freq)) lst = sorted(sorted(lst, key = lambda alpha: alpha[0]), key = lambda freq: \ freq[1], reverse = True) return lst[:5]
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in alphabetical order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" topfive = [] tweets_by_state = group_tweets_by_state(tweets) i = 0 if "AA" in tweets_by_state: del tweets_by_state["AA"] def getmax(tweets): """Determines the state with the most tweets """ return max(tweets_by_state, key=lambda x: len(tweets_by_state.get(x))) while i < 5: key = getmax(tweets_by_state) value = len(tweets_by_state[key]) topfive += [(key, value)] if key in tweets_by_state: del tweets_by_state[key] i += 1 return topfive
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in any order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term grouped_tweets = group_tweets_by_state(tweets) top_five = [] num_tweets = [] states = [] keys = sorted(list(grouped_tweets.keys())) for key in keys: num_tweets.append(len(grouped_tweets[key])) states.append(key) for x in range(0,5): try: top_five.append( ( states[num_tweets.index(max(num_tweets))] , num_tweets[num_tweets.index(max(num_tweets))] ) ) states.remove(states[num_tweets.index(max(num_tweets))]) num_tweets.remove(num_tweets[num_tweets.index(max(num_tweets))]) except: return None return top_five
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in any order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term lst_greatest = [] tweets_by_state = group_tweets_by_state(tweets) for k in tweets_by_state.keys(): tweets_by_state[k] = len(tweets_by_state[k]) while len(lst_greatest) < 5: current_greatest = 0 greatest_state = '' for k in tweets_by_state.keys(): if tweets_by_state[k] > current_greatest: current_greatest = tweets_by_state[k] greatest_state = k elif tweets_by_state[k] == current_greatest: if k < greatest_state: current_greatest = tweets_by_state[k] greatest_state = k lst_greatest.append((greatest_state, tweets_by_state[greatest_state])) del tweets_by_state[greatest_state] return lst_greatest
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. If multiple states tie for the most talkative, return any of them. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" stateCounter = {} count = 0 tweetState= group_tweets_by_state(tweets) for state in tweetState: for tweet in tweetState[state]: if term in tweet_words(tweet): count += 1 stateCounter[state] = count count = 0 mostStateTweet= None for stateKey in stateCounter: if mostStateTweet == None: mostStateTweet = stateCounter[stateKey] state=stateKey elif stateCounter[stateKey] > mostStateTweet: mostStateTweet = stateCounter[stateKey] state = stateKey return state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' >>> most_talkative_state('math') 'CA' >>> most_talkative_state('ham') 'OH' >>> most_talkative_state('democrat') 'CA' >>> most_talkative_state('republican') 'WA' >>> most_talkative_state('glee') 'CA' >>> most_talkative_state('python') 'CA' >>> most_talkative_state('internet') 'TX' >>> most_talkative_state('ramen') 'CA' >>> most_talkative_state('dreary') 'NJ' >>> most_talkative_state('taxes') 'NJ' >>> most_talkative_state('bad') 'NJ' >>> most_talkative_state('hate') 'NJ' >>> most_talkative_state('tears') 'NJ' >>> most_talkative_state('despair') 'NJ' >>> most_talkative_state('die') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" state_counter = {} count = 0 tweet_state_l = group_tweets_by_state(tweets) for state in tweet_state_l: # Start iteration for tweets in order for tweet in tweet_state_l[state]: #counts number of times term comes up if term in tweet_words(tweet): count += 1 state_counter[state] = count count = 0 most_state_tweet = None for state_key in state_counter: if most_state_tweet == None: most_state_tweet = state_counter[state_key] state = state_key elif state_counter[state_key] > most_state_tweet: most_state_tweet = state_counter[state_key] state = state_key return state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***"
def draw_map_by_hour(term='my job', pause=0.5): """Draw the sentiment map for tweets that match term, for each hour.""" tweets = load_tweets(make_tweet, term) tweets_by_hour = group_tweets_by_hour(tweets) for hour in range(24): current_tweets = tweets_by_hour.get(hour, []) tweets_by_state = group_tweets_by_state(current_tweets) state_sentiments = average_sentiments(tweets_by_state) draw_state_sentiments(state_sentiments) message("{0:02}:00-{0:02}:59".format(hour)) wait(pause)
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. If multiple states tie for the most talkative, return any of them. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***"
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in alphabetical order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***"
def draw_map_for_term(term='my job'): """Draw the sentiment map corresponding to the tweets that contain term. Some term suggestions: New York, Texas, sandwich, my life, justinbieber """ tweets = load_tweets(make_tweet, term) tweets_by_state = group_tweets_by_state(tweets) state_sentiments = average_sentiments(tweets_by_state) draw_state_sentiments(state_sentiments) for tweet in tweets: draw_dot(tweet_location(tweet), analyze_tweet_sentiment(tweet)) wait()
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in alphabetical order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ """def add_to_top5(tup): tweets = load_tweets(make_tweet, term) # A list of tweets containing term tweets_left = group_tweets_by_state(tweets) top_five = [] for key in sorted(tweets_left): if len(top_five) < 5: top_five += [(len(tweets_left[key]),key)] else: top_five = sorted(top_five) if len(tweets_left[key]) > top_five[0][0]: top_five[0] = (len(tweets_left[key]),key) for index in range(5): top_five[index] = (top_five[index][1],top_five[index][0]) top_five = top_five[::-1] value = top_five[0][1] start = 0 for index in range(1,5): if value != top_five[index][1]: top_five[start:index] = sorted(top_five[start:index]) start = index value = top_five[index][1] top_five[start:index] = sorted(top_five[start:index]) return top_five""" # #for key in sorted(tweets_left): tweets = load_tweets(make_tweet, term) # A list of tweets containing term tweets_left = group_tweets_by_state(tweets) top_five = [] for key in sorted(tweets_left): if len(top_five) < 5: top_five += [(key,len(tweets_left[key]))] else: top_five = sorted(top_five, key=lambda x: x[1], reverse=True) if len(tweets_left[key]) > top_five[4][1]: top_five[4] = (key,len(tweets_left[key])) return top_five
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" group = group_tweets_by_state(tweets) number_of_most_mentioned = max([len(group[i]) for i in group]) for some_state in group: if len(group[some_state]) == number_of_most_mentioned: return some_state
def draw_map_for_query(term='my job', file_name='tweets2014.txt'): """Draw the sentiment map corresponding to the tweets that contain term. Some term suggestions: New York, Texas, sandwich, my life, justinbieber """ tweets = load_tweets(make_tweet, term, file_name) tweets_by_state = group_tweets_by_state(tweets) state_sentiments = average_sentiments(tweets_by_state) draw_state_sentiments(state_sentiments) for tweet in tweets: s = analyze_tweet_sentiment(tweet) if has_sentiment(s): draw_dot(tweet_location(tweet), sentiment_value(s)) wait()
def draw_map_for_term(term='my job'): """Draw the sentiment map corresponding to the tweets that contain term. Some term suggestions: New York, Texas, sandwich, my life, justinbieber """ tweets = load_tweets(make_tweet, term) tweets_by_state = group_tweets_by_state(tweets) state_sentiments = average_sentiments(tweets_by_state) draw_state_sentiments(state_sentiments) for tweet in tweets: s = analyze_tweet_sentiment(tweet) if has_sentiment(s): draw_dot(tweet_location(tweet), sentiment_value(s)) wait()
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" group_tweets=group_tweets_by_state(tweets) most_num=max(len(group_tweets[tweet]) for tweet in group_tweets) for tweet in group_tweets: if len(group_tweets[tweet]) == most_num: return tweet
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ high = 0 tweets = load_tweets(make_tweet, term) # A list of tweets containing term a = group_tweets_by_state(tweets) for state in a: if len(a[state]) > high: high = len(a[state]) talkative_state = state return talkative_state
def ExportToCSV(term=""): """ Create a vPython Graph of the tweets in the US by state, for a given subject denoted by "term". input 'term': the word to analyze tweet sentiment for. returns: Will create a vPython histogram of the sentiments and frequency of tweets containing 'term' organized by state. """ if term == "": term = input("Enter a term to be graphed by sentiment.\n\n\t>") tweets = load_tweets(make_tweet, term) tweetDict = group_tweets_by_state(tweets) sentDict = average_sentiments(tweetDict)
def draw_map_by_hour(find_state, term='my job', pause=0.5, canvas=None, imglist=None): """Draw the sentiment map for tweets that match term, for each hour.""" word_sentiments = load_sentiments() tweets = load_tweets(term) tweets_by_hour = group_tweets_by_hour(tweets) for hour in range(24): current_tweets = tweets_by_hour[hour] tweets_by_state = group_tweets_by_state(current_tweets, find_state) state_sentiments = average_sentiments(tweets_by_state,word_sentiments) draw_state_sentiments(state_sentiments, canvas=canvas) message("{0:02}:00-{0:02}:59".format(hour), canvas=canvas) wait(pause, canvas=canvas) if imglist is not None: imglist.append(get_img_copy(canvas))
def get_vectorised_seq(path_to_tweets="temp/tweets.pickle", to_pickle=True): if os.path.exists(path_to_tweets): tweets = pd.read_pickle(path_to_tweets) else: tweets = d.load_tweets() list_all_tweets = d.get_all_tweets(tweets) list_all_tweets = [tweet for tweet in list_all_tweets if len(tweet) != 0] list_all_words = d.get_all_words(tweets) vocab = ["<pad>"] + list_all_words vectorised_seq = [ [[vocab.index(word) for word in tweet] for tweet in daily_tweets] for daily_tweets in tweets ] if to_pickle: with open("temp/vectorised_seq.pickle", "wb") as f: pickle.dump(vectorised_seq, f) return vectorised_seq, vocab
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term tweet_dic = group_tweets_by_state(tweets) m = 0 state = None for k in tweet_dic: if len(tweet_dic[k]) > m: m = len(tweet_dic[k]) state = k return state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ result = '' tweets = load_tweets(make_tweet, term) # A list of tweets containing term dici = group_tweets_by_state(tweets) my_Max = 0 for i in dici: if len(dici[i]) > my_Max: my_Max = len(dici[i]) result = i return result
def draw_map_for_term(find_state, term='my job', canvas=None): """Draw the sentiment map corresponding to the tweets that contain term. Some term suggestions: New York, Texas, sandwich, my life, justinbieber """ word_sentiments = load_sentiments() tweets = load_tweets(term) tweets_by_state = group_tweets_by_state(tweets, find_state) state_sentiments = average_sentiments(tweets_by_state,word_sentiments) draw_state_sentiments(state_sentiments, canvas=canvas) for tweet in tweets: s = tweet.get_sentiment(word_sentiments) if s != None: draw_dot(tweet.get_location(), s, canvas=canvas) wait(canvas=canvas)
def run(*args): """Read command-line arguments and calls corresponding functions.""" import argparse parser = argparse.ArgumentParser(description="Run Trends") parser.add_argument('--print_sentiment', '-p', action='store_true') parser.add_argument('--run_doctests', '-t', action='store_true') parser.add_argument('--draw_centered_map', '-d', action='store_true') parser.add_argument('--draw_state_sentiments', '-s', action='store_true') parser.add_argument('--draw_map_for_term', '-m', action='store_true') parser.add_argument('--draw_map_by_hour', '-b', action='store_true') parser.add_argument('--containing_state', '-c', action='store_true') parser.add_argument('--file', '-f', type=str, default=None) parser.add_argument('text', metavar='T', type=str, nargs='*', help='Text to process') args = parser.parse_args() if (args.__dict__['containing_state']): find_state = find_containing_state(us_states) else: us_centers = {n: find_center(s) for n, s in us_states.items()} find_state = find_closest_state(us_centers) if args.__dict__['file']: canvas = MapImage(960,500) else: canvas = None for name, execute in args.__dict__.items(): if name != 'text' and name != 'containing_state' and name != 'file' and execute: if name == 'draw_map_for_term': draw_map_for_term(find_state, ' '.join(args.text), canvas=canvas) if canvas: canvas._img.save(args.file+'.png', "PNG") elif name == 'draw_map_by_hour': imglist = [] if canvas else None draw_map_by_hour(find_state, ' '.join(args.text), canvas=canvas, imglist=imglist) if canvas: for i in range(24): imglist[i].save(args.file+'_'+str(i).zfill(2)+'.png', "PNG") elif name == 'draw_centered_map': draw_centered_map(' '.join(args.text), canvas=canvas) if canvas: canvas._img.save(args.file+'.png', "PNG") elif name == 'draw_state_sentiments': draw_state_sentiments(average_sentiments(group_tweets_by_state(load_tweets(' '.join(args.text)), find_state),load_sentiments()), canvas=canvas) wait(canvas=canvas) if canvas: canvas._img.save(args.file+'.png', "PNG") else: globals()[name](' '.join(args.text))
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term states_with_term = group_tweets_by_state(tweets) most_talkative, state = 0, '' for k in states_with_term: talkative_state = len(states_with_term[k]) if most_talkative < talkative_state: most_talkative = talkative_state state = k return state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ new = {} base = 0 tweets = load_tweets(make_tweet, term) # A list of tweets containing term compare = group_tweets_by_state(tweets) for keys in compare: new[keys] = len(compare[keys]) if new[keys] > base: base = new[keys] winner = keys return winner
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term most_talkative_state_tweets = 0 grouped_tweets = group_tweets_by_state(tweets) for state in grouped_tweets: state_tweets = grouped_tweets[state] relevant_length = len(state_tweets) if relevant_length > most_talkative_state_tweets: most_talkative_state_tweets = relevant_length most_talkative_state = state return most_talkative_state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term tweets_agrupados = group_tweets_by_state(tweets) maior = 0 for chave in tweets_agrupados: if len(tweets_agrupados[chave]) >= maior: maior = len(tweets_agrupados[chave]) estado = chave return estado
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term grouped_tweets = group_tweets_by_state(tweets) number_of_tweets = 0 # Initial tweet count frequency_of_tweets = {} # Dictionary with key: state & value: # of tweets for state in grouped_tweets: frequency = len(grouped_tweets[state]) if frequency > number_of_tweets: number_of_tweets = frequency most_voluble_state = state # State that talks the most return most_voluble_state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" lista_tweets = group_tweets_by_state(tweets) estado = 0 contador = 0 for x in lista_tweets: if len(lista_tweets[x]) > contador: contador = len(lista_tweets[x]) estado = x return estado
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" grouped_tweets = group_tweets_by_state(tweets) num_terms = 0 talkative_state = '' for state in grouped_tweets: if len(grouped_tweets[state]) > num_terms: # Iterates through all the states and compares the number of Tweets associated with the term. It sets the most talkative state to the state with the largest number of Tweets. num_terms = len(grouped_tweets[state]) talkative_state = state return talkative_state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term talk = group_tweets_by_state(tweets) #devolve os estados com o termo wine = 0 state = "" for i in talk: if len(talk[i]) > wine: wine = len(talk[i]) state = i return state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term state_centers = {n: find_center(s) for n, s in us_states.items()} tgbs = group_tweets_by_state( tweets ) # Stands for "Tweets, Grouped By State" for shortening purposes :P most_tweets = 0 swmt = '' # "State with Most Tweets" for states in tgbs: if len(tgbs[states]) > most_tweets: most_tweets, swmt = len(tgbs[states]), states return swmt
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term state_dict = group_tweets_by_state(tweets) #get count of dict with most items in list #dict should be of form {'AK':(tweet, tweet, tweet), 'AL': (tweet, tweet)...} var = 0 name = '' for num in state_dict: if len(state_dict[num]) > var: var = len(state_dict[num]) name = num return name
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term grupos = group_tweets_by_state(tweets) Mstates = [grupos.keys()][0] comprimentoStates = len(grupos[Mstates]) for state, tweets in grupos.items(): temp = len(tweets) if temp > comprimentoStates: comprimentoStates = temp Mstates = state return Mstates
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in alphabetical order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" tweets_by_state = group_tweets_by_state(tweets) tweet_count_by_state = [(n, len(s)) for n, s in tweets_by_state.items()] tweet_count_by_state.sort() tweet_count_by_state = sorted(tweet_count_by_state, key=lambda state: -state[1]) return tweet_count_by_state[:5]
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" most_term_references = 0 most_talkative = '' tweets_by_state = group_tweets_by_state(tweets) for state in tweets_by_state: current_references = len(tweets_by_state[state]) if (current_references > most_term_references): most_term_references = current_references most_talkative = state return most_talkative
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" tweets_by_state = group_tweets_by_state(tweets) first_time = True for key in tweets_by_state: if first_time: most_talkative = key first_time = False if len(tweets_by_state[most_talkative]) < len(tweets_by_state[key]): most_talkative = key return most_talkative
def most_talkative_state(term): #Problema 10 """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term tweetsestados = group_tweets_by_state(tweets) mosttalkative = [] for key in tweetsestados.keys(): if len(mosttalkative) == 0: mosttalkative = [key, len(tweetsestados[key])] elif len(tweetsestados[key]) > mosttalkative[1]: mosttalkative[0] = key mosttalkative[1] = len(tweetsestados[key]) return mosttalkative[0]
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" falandosobre = group_tweets_by_state(tweets) maisfalado = 0 estadomaisfalado = "nenhum" for estado in falandosobre: if len(falandosobre[estado]) > maisfalado: maisfalado = len(falandosobre[estado]) estadomaisfalado = estado return estadomaisfalado
def draw_map_for_term(term='Berkeley'): """ Draw the sentiment map corresponding to the tweets that match term. term -- a word or phrase to filter the tweets by. To visualize tweets containing the word "obama": # python3 trends.py obama Some term suggestions: New York, Texas, sandwich, my life, justinbieber """ tweets = load_tweets(make_tweet, term) tweets_by_state = group_tweets_by_state(tweets) state_sentiments = calculate_average_sentiments(tweets_by_state) draw_state_sentiments(state_sentiments) for tweet in tweets: draw_dot(tweet_location(tweet), analyze_tweet_sentiment(tweet)) wait()
def most_talkative_states(term): """Return a list of the top five states with the largest number of tweets containing 'term' in descending order (from most to least). If multiple states tie, return them in alphabetical order. >>> most_talkative_states('texas') [('TX', 1541), ('LA', 303), ('OK', 207), ('NM', 55), ('AR', 41)] >>> most_talkative_states('soup') [('CA', 57), ('NJ', 41), ('OH', 31), ('FL', 26), ('MA', 23)] """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" tweets_by_state=group_tweets_by_state(tweets) num_tweets_by_state={} for state,tweets in tweets_by_state.items(): num_tweets_by_state[state]=len(tweets) output=sorted(num_tweets_by_state.items()) output.sort(key=lambda x:x[1], reverse=True) return output[0:5]
def most_talkative_state( term): # retorna a sigla do estado que mais falou esse termo """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" Tweets = group_tweets_by_state(tweets) #term mais falado largest = 0 aux = '' for i in Tweets: if len(Tweets[i]) > largest: largest = len(Tweets[i]) aux = i return (aux)
def ExportToCSV(term=""): """ Create a vPython Graph of the tweets in the US by state, for a given subject denoted by "term". input 'term': the word to analyze tweet sentiment for. returns: Will create a vPython histogram of the sentiments and frequency of tweets containing 'term' organized by state. """ if term == "": term = input("Enter a term to be graphed by sentiment.\n\n\t>") tweets = load_tweets(make_tweet, term) tweetDict = group_tweets_by_state(tweets) sentDict = average_sentiments(tweetDict) # Your code to export sentDict to .CSV file with open('sentDictFile','w') as file: [file.write('{0},{1}\n'.format(key,value)) for key, value in sentDict.items()]
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" tweets_by_state = group_tweets_by_state(tweets) talkative = 'HI' max_tweet = len(tweets_by_state['HI']) #print('CA num:', len(tweets_by_state['CA'])) for state in tweets_by_state: state_num = len(tweets_by_state[state]) if state_num >= max_tweet: talkative = state max_tweet = state_num #print('max: ', max_tweet, 'name: ', state) return talkative
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" tweets_per_state = group_tweets_by_state(tweets) list_occurrences_states = [] for key, value in tweets_per_state.items(): list_occurrences_states += [[len(value), key]] ocurrences = list_occurrences_states[0][0] state = list_occurrences_states[0][1] for ocurrences_states in list_occurrences_states: if ocurrences < ocurrences_states[0]: ocurrences = ocurrences_states[0] state = ocurrences_states[1] return state
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" tweets_by_state = group_tweets_by_state(tweets) #atribui a essa variavel o que fizemos no problema anterior maximo = 0 for x in tweets_by_state: #tweets_by_state contem todos os estados e seus respectivos tweets if len(tweets_by_state[x]) > maximo: #faz atividade abaixo se o tamanho (quantidade de tweets) para estado X for maior que o maximo (inicia como 0) maximo = len(tweets_by_state[x]) #atualiza-se o maximo para o tamanho da quantidade de tweets no estado que mais se citou o termo estado_mais_termos = x #e logo depois retorna qual estado foi esse (x) return estado_mais_termos #após o fim de todas as iterações do for, retorna o estado
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('soup') 'CA' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term tweets_by_state = group_tweets_by_state(tweets) most = 0 for state in tweets_by_state: count = 0 for tweet in tweets_by_state[state]: if term in tweet_words(tweet): count += 1 if count > most: most = count most_talkative = state return most_talkative
def most_talkative_state(term): """Returns a tuple with the state that has the largest number of tweets containing the term and the number of tweets. If multiple states tie for the most talkative, return any one of them. For testing, use terms of 'texas', 'soup', 'sandwich' and 'obama' >>> most_talkative_state('texas') ('TX', 1541) >>> most_talkative_state('soup') ('CA', 57) """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term talkDict = group_tweets_by_state(tweets) stateCode = 'UK' maxTweets = 0 for state in talkDict.keys(): if len(talkDict[state]) > maxTweets: maxTweets = len(talkDict[state]) stateCode = state return (stateCode, maxTweets)
def most_talkative_state(term): """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term "*** YOUR CODE HERE ***" us_centers = {n: find_center(s) for n, s in us_states.items()} dir_states={} us_states_list=list(us_centers.keys()) dir_states=group_tweets_by_state(tweets) curr_state={} n=0 while n <= len(us_states_list)-1: if us_states_list[n] in dir_states.keys(): curr_state[us_states_list[n]]=len(dir_states[us_states_list[n]]) n+=1 else: n+=1 max_value=max(curr_state.values()) i=0 while i <= len(us_states_list)-1: if us_states_list[i] in curr_state.keys(): if curr_state[us_states_list[i]]==max_value: most_talkative_state=us_states_list[i] break else: i+=1 else: i+=1 return most_talkative_state
def most_talkative_state(term): #problema10 """Return the state that has the largest number of tweets containing term. >>> most_talkative_state('texas') 'TX' >>> most_talkative_state('sandwich') 'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term all_tweets = group_tweets_by_state( tweets) #retorna dic com todos os tweets por estado all_tweets_list = [] for key in all_tweets: all_tweets_list.append((key, all_tweets[key])) for x in range(0, len(all_tweets) - 1): if len(all_tweets_list[x][1]) > len(all_tweets_list[x + 1][1]): most_talkative = all_tweets_list[x][0] all_tweets_list[x], all_tweets_list[x + 1] = all_tweets_list[ x + 1], all_tweets_list[x] else: most_talkative = all_tweets_list[x + 1] return most_talkative
'NJ' """ tweets = load_tweets(make_tweet, term) # A list of tweets containing term state_centers = {n: find_center(s) for n, s in us_states.items()} tgbs = group_tweets_by_state( tweets ) # Stands for "Tweets, Grouped By State" for shortening purposes :P most_tweets = 0 swmt = '' # "State with Most Tweets" for states in tgbs: if len(tgbs[states]) > most_tweets: most_tweets, swmt = len(tgbs[states]), states return swmt tweetswithtexas = group_tweets_by_state(load_tweets( make_tweet, 'texas')) # For testing the function out tweetswithobama = group_tweets_by_state(load_tweets( make_tweet, 'obama')) # Also for testing def average_sentiments(tweets_by_state): """Calculate the average sentiment of the states by averaging over all the tweets from each state. Return the result as a dictionary from state names to average sentiment values (numbers). If a state has no tweets with sentiment values, leave it out of the dictionary entirely. Do NOT include states with no tweets, or with tweets that have no sentiment, as 0. 0 represents neutral sentiment, not unknown sentiment. tweets_by_state -- A dictionary from state names to lists of tweets