-
Notifications
You must be signed in to change notification settings - Fork 0
/
trends.py
459 lines (386 loc) · 16.1 KB
/
trends.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
"""Visualizing Twitter Sentiment Across America"""
from data import word_sentiments, load_tweets
from datetime import datetime
from doctest import run_docstring_examples
from geo import us_states, geo_distance, make_position, longitude, latitude
from maps import draw_state, draw_name, draw_dot, wait, message
from string import ascii_letters
from ucb import main, trace, interact, log_current_line
import operator
# Phase 1: The Feelings in Tweets
def make_tweet(text, time, lat, lon):
"""Return a tweet, represented as a python dictionary.
text -- A string; the text of the tweet, all in lowercase
time -- A datetime object; the time that the tweet was posted
latitude -- A number; the latitude of the tweet's location
longitude -- A number; the longitude of the tweet's location
>>> t = make_tweet("just ate lunch", datetime(2012, 9, 24, 13), 38, 74)
>>> tweet_words(t)
['just', 'ate', 'lunch']
>>> tweet_time(t)
datetime.datetime(2012, 9, 24, 13, 0)
>>> p = tweet_location(t)
>>> latitude(p)
38
"""
return {'text': text, 'time': time, 'latitude': lat, 'longitude': lon}
def tweet_words(tweet):
"""Return a list of the words in the text of a tweet."""
return extract_words(tweet['text'])
def tweet_time(tweet):
"""Return the datetime that represents when the tweet was posted."""
return tweet['time']
def tweet_location(tweet):
"""Return a position (see geo.py) that represents the tweet's location."""
return make_position(tweet['latitude'], tweet['longitude'])
def tweet_string(tweet):
"""Return a string representing the tweet."""
return '"{0}" @ {1}'.format(tweet['text'], tweet_location(tweet))
def extract_words(text):
"""Return the words in a tweet, not including punctuation.
>>> extract_words('anything else.....not my job')
['anything', 'else', 'not', 'my', 'job']
>>> extract_words('i love my job. #winning')
['i', 'love', 'my', 'job', 'winning']
>>> extract_words('make justin # 1 by tweeting #vma #justinbieber :)')
['make', 'justin', 'by', 'tweeting', 'vma', 'justinbieber']
>>> extract_words("paperclips! they're so awesome, cool, & useful!")
['paperclips', 'they', 're', 'so', 'awesome', 'cool', 'useful']
"""
newlist,k=[],0
while k < len(text):
newlist.append(text[k])
k+=1
k=0
while k<(len(newlist)):
if newlist[k] not in ascii_letters:
newlist[k] = ' '
k+=1
words = ''.join(newlist)
return words.split()
def make_sentiment(value):
"""Return a sentiment, which represents a value that may not exist.
>>> s = make_sentiment(0.2)
>>> t = make_sentiment(None)
>>> has_sentiment(s)
True
>>> has_sentiment(t)
False
>>> sentiment_value(s)
0.2
"""
assert value is None or (value >= -1 and value <= 1), 'Illegal value'
return value
def has_sentiment(s):
"""Return whether sentiment s has a value."""
if s == None:
return False
elif s > -1 and s < 1:
return True
def sentiment_value(s):
"""Return the value of a sentiment s."""
assert has_sentiment(s), 'No sentiment value'
return s
def get_word_sentiment(word):
"""Return a sentiment representing the degree of positive or negative
feeling in the given word, if word is not in the sentiment dictionary.
>>> sentiment_value(get_word_sentiment('good'))
0.875
>>> sentiment_value(get_word_sentiment('bad'))
-0.625
>>> sentiment_value(get_word_sentiment('winning'))
0.5
>>> has_sentiment(get_word_sentiment('Berkeley'))
False
"""
return make_sentiment(word_sentiments.get(word, None))
def analyze_tweet_sentiment(tweet):
""" Return a sentiment representing the degree of positive or negative
sentiment in the given tweet, averaging over all the words in the tweet
that have a sentiment value.
If no words in the tweet have a sentiment value, return
make_sentiment(None).
>>> positive = make_tweet('i love my job. #winning', None, 0, 0)
>>> round(sentiment_value(analyze_tweet_sentiment(positive)), 5)
0.29167
>>> negative = make_tweet("Thinking, 'I hate my job'", None, 0, 0)
>>> sentiment_value(analyze_tweet_sentiment(negative))
-0.25
>>> no_sentiment = make_tweet("Go bears!", None, 0, 0)
>>> has_sentiment(analyze_tweet_sentiment(no_sentiment))
False
"""
average = make_sentiment(None)
counter = 0
total_sentiment = 0
for words in tweet_words(tweet):
words_sent = get_word_sentiment(words)
if has_sentiment(words_sent):
total_sentiment += sentiment_value(words_sent)
counter += 1
if counter > 0:
average = make_sentiment(total_sentiment / counter)
return average
# Phase 2: The Geometry of Maps
def find_centroid(polygon):
"""Find the centroid of a polygon.
http://en.wikipedia.org/wiki/Centroid#Centroid_of_polygon
polygon -- A list of positions, in which the first and last are the same
Returns: 3 numbers; centroid latitude, centroid longitude, and polygon area
Hint: If a polygon has 0 area, return its first position as its centroid
>>> p1, p2, p3 = make_position(1, 2), make_position(3, 4), make_position(5, 0)
>>> triangle = [p1, p2, p3, p1] # First vertex is also the last vertex
>>> find_centroid(triangle)
(3.0, 2.0, 6.0)
>>> find_centroid([p1, p3, p2, p1])
(3.0, 2.0, 6.0)
>>> find_centroid([p1, p2, p1])
(1, 2, 0)
"""
i, area = 0, 0
if len(polygon)<=3:
return ((polygon[0])[0], ((polygon[0])[1]), 0)
while i<len(polygon)-1:
area += ((polygon[i])[0] * (polygon[i+1])[1]) - ((polygon[i+1])[0] * (polygon[i])[1])
i+=1
area,i,clat,clon=((area/2)),0,0,0
while i<len(polygon)-1:
clat += ((polygon[i])[0] + (polygon[i+1])[0]) * (((polygon[i])[0] * (polygon[i+1])[1]) - ((polygon[i+1])[0] * (polygon[i])[1]))
clon += ((polygon[i])[1] + (polygon[i+1])[1]) * (((polygon[i])[0] * (polygon[i+1])[1]) - ((polygon[i+1])[0] * (polygon[i])[1]))
i+=1
if area == 0:
return ((polygon[0])[0], ((polygon[0])[1]), 0)
else:
clat = (clat/(6*area))
clon = (clon/(6*area))
return (clat, clon, abs(area))
def find_center(polygons):
"""Compute the geographic center of a state, averaged over its polygons.
The center is the average position of centroids of the polygons in polygons,
weighted by the area of those polygons.
Arguments:
polygons -- a list of polygons
>>> ca = find_center(us_states['CA']) # California
>>> round(latitude(ca), 5)
37.25389
>>> round(longitude(ca), 5)
-119.61439
>>> hi = find_center(us_states['HI']) # Hawaii
>>> round(latitude(hi), 5)
20.1489
>>> round(longitude(hi), 5)
-156.21763
"""
i, latnumer, latdenom = 0, 0, 0
while i < len(polygons):
latnumer += (find_centroid(polygons[i]))[0] * (find_centroid(polygons[i])[2])
latdenom += (find_centroid(polygons[i]))[2]
i+=1
clat,longnumer,longdenom,i = (latnumer/latdenom), 0, 0, 0
while i < len(polygons):
longnumer += (find_centroid(polygons[i]))[1] * (find_centroid(polygons[i])[2])
longdenom += (find_centroid(polygons[i]))[2]
i+=1
clon = longnumer/longdenom
return (clat, clon)
# Phase 3: The Mood of the Nation
def find_closest_state(tweet, state_centers):
"""Return the name of the state closest to the given tweet's location.
Use the geo_distance function (already provided) to calculate distance
in miles between two latitude-longitude positions.
Arguments:
tweet -- a tweet abstract data type
state_centers -- a dictionary from state names to positions.
>>> us_centers = {n: find_center(s) for n, s in us_states.items()}
>>> sf = make_tweet("Welcome to San Francisco", None, 38, -122)
>>> ny = make_tweet("Welcome to New York", None, 41, -74)
>>> find_closest_state(sf, us_centers)
'CA'
>>> find_closest_state(ny, us_centers)
'NJ'
"""
diclist = list(state_centers)
i, smallest = 1, 1e12
while i<len(state_centers):
first = geo_distance(tweet_location(tweet), state_centers[diclist[i-1]])
sec = geo_distance(tweet_location(tweet), state_centers[diclist[i]])
if first < sec:
if first<smallest:
smallest = first
state = diclist[i - 1]
elif sec < first:
if sec < smallest:
smallest = sec
state = diclist[i]
i += 1
return state
def group_tweets_by_state(tweets):
"""Return a dictionary that aggregates tweets by their nearest state center.
The keys of the returned dictionary are state names, and the values are
lists of tweets that appear closer to that state center than any other.
tweets -- a sequence of tweet abstract data types
>>> sf = make_tweet("Welcome to San Francisco", None, 38, -122)
>>> ny = make_tweet("Welcome to New York", None, 41, -74)
>>> ca_tweets = group_tweets_by_state([sf, ny])['CA']
>>> tweet_string(ca_tweets[0])
'"Welcome to San Francisco" @ (38, -122)'
"""
tweets_by_state = {}
us_centers = {i: find_center(s) for i, s in us_states.items()}
for k in tweets:
state = find_closest_state(k, us_centers)
if state in tweets_by_state:
tweets_by_state[state].append(k)
else:
tweets_by_state[state] = [k]
return tweets_by_state
def most_talkative_state(term):
"""Return the state that has the largest number of tweets containing term.
>>> most_talkative_state('texas')
'TX'
>>> most_talkative_state('sandwich')
'NJ'
"""
tweets = load_tweets(make_tweet, term)
tweets_dict, us_centers, k = {}, {i: find_center(s) for i, s in us_states.items()}, 0
while k < len(tweets):
state = find_closest_state(tweets[k], us_centers)
if term in tweet_words(tweets[k]):
if not state in tweets_dict:
tweets_dict[state] = 1
else:
tweets_dict[state] = 1+tweets_dict[state]
k+=1
biggest = max(tweets_dict, key = (lambda x: tweets_dict[x]))
return biggest
def average_sentiments(tweets_by_state):
"""Calculate the average sentiment of the states by averaging over all
the tweets from each state. Return the result as a dictionary from state
names to average sentiment values (numbers).
If a state has no tweets with sentiment values, leave it out of the
dictionary entirely. Do NOT include states with no tweets, or with tweets
that have no sentiment, as 0. 0 represents neutral sentiment, not unknown
sentiment.
tweets_by_state -- A dictionary from state names to lists of tweets
"""
averaged_state_sentiments = {}
for state in tweets_by_state:
total, sent, count, with_sentiment = 0, False, len(tweets_by_state[state]), 0
if count > 0:
for tweet in tweets_by_state[state]:
if has_sentiment(analyze_tweet_sentiment(tweet)):
total += sentiment_value(analyze_tweet_sentiment(tweet))
sent = True
with_sentiment = with_sentiment + 1
if sent:
averaged_state_sentiments[state] = total / with_sentiment
return averaged_state_sentiments
# Phase 4: Into the Fourth Dimension
def group_tweets_by_hour(tweets):
"""Return a dictionary that groups tweets by the hour they were posted.
The keys of the returned dictionary are the integers 0 through 23.
The values are lists of tweets, where tweets_by_hour[i] is the list of all
tweets that were posted between hour i and hour i + 1. Hour 0 refers to
midnight, while hour 23 refers to 11:00PM.
To get started, read the Python Library documentation for datetime objects:
http://docs.python.org/py3k/library/datetime.html#datetime.datetime
tweets -- A list of tweets to be grouped
"""
tweets_by_hour = {}
hour = 0
while hour < 24:
tweets_by_hour[hour] = []
for tweet in tweets:
if tweet_time(tweet).hour == hour:
if hour in tweets_by_hour:
tweets_by_hour[hour].append(tweet)
else:
tweets_by_hour[hour] = [tweet]
hour += 1
return tweets_by_hour
def print_sentiment(text='Are you virtuous or verminous?'):
"""Print the words in text, annotated by their sentiment scores."""
words = extract_words(text.lower())
assert words, 'No words extracted from "' + text + '"'
layout = '{0:>' + str(len(max(words, key=len))) + '}: {1:+}'
for word in extract_words(text.lower()):
s = get_word_sentiment(word)
if has_sentiment(s):
print(layout.format(word, sentiment_value(s)))
def draw_centered_map(center_state='TX', n=10):
"""Draw the n states closest to center_state."""
us_centers = {n: find_center(s) for n, s in us_states.items()}
center = us_centers[center_state.upper()]
dist_from_center = lambda name: geo_distance(center, us_centers[name])
for name in sorted(us_states.keys(), key=dist_from_center)[:int(n)]:
draw_state(us_states[name])
draw_name(name, us_centers[name])
draw_dot(center, 1, 10) # Mark the center state with a red dot
wait()
def draw_state_sentiments(state_sentiments={}):
"""Draw all U.S. states in colors corresponding to their sentiment value.
Unknown state names are ignored; states without values are colored grey.
state_sentiments -- A dictionary from state strings to sentiment values
"""
for name, shapes in us_states.items():
sentiment = state_sentiments.get(name, None)
draw_state(shapes, sentiment)
for name, shapes in us_states.items():
center = find_center(shapes)
if center is not None:
draw_name(name, center)
def draw_map_for_term(term='my job'):
"""Draw the sentiment map corresponding to the tweets that contain term.
Some term suggestions:
New York, Texas, sandwich, my life, justinbieber
"""
tweets = load_tweets(make_tweet, term)
tweets_by_state = group_tweets_by_state(tweets)
state_sentiments = average_sentiments(tweets_by_state)
draw_state_sentiments(state_sentiments)
for tweet in tweets:
s = analyze_tweet_sentiment(tweet)
if has_sentiment(s):
draw_dot(tweet_location(tweet), sentiment_value(s))
wait()
def draw_map_by_hour(term='my job', pause=0.5):
"""Draw the sentiment map for tweets that match term, for each hour."""
tweets = load_tweets(make_tweet, term)
tweets_by_hour = group_tweets_by_hour(tweets)
for hour in range(24):
current_tweets = tweets_by_hour.get(hour, [])
tweets_by_state = group_tweets_by_state(current_tweets)
state_sentiments = average_sentiments(tweets_by_state)
draw_state_sentiments(state_sentiments)
message("{0:02}:00-{0:02}:59".format(hour))
wait(pause)
def run_doctests(names):
"""Run verbose doctests for all functions in space-separated names."""
g = globals()
errors = []
for name in names.split():
if name not in g:
print("No function named " + name)
else:
if run_docstring_examples(g[name], g, True) is not None:
errors.append(name)
if len(errors) == 0:
print("Test passed.")
else:
print("Error(s) found in: " + ', '.join(errors))
@main
def run(*args):
"""Read command-line arguments and calls corresponding functions."""
import argparse
parser = argparse.ArgumentParser(description="Run Trends")
parser.add_argument('--print_sentiment', '-p', action='store_true')
parser.add_argument('--run_doctests', '-t', action='store_true')
parser.add_argument('--draw_centered_map', '-d', action='store_true')
parser.add_argument('--draw_map_for_term', '-m', action='store_true')
parser.add_argument('--draw_map_by_hour', '-b', action='store_true')
parser.add_argument('text', metavar='T', type=str, nargs='*',
help='Text to process')
args = parser.parse_args()
for name, execute in args.__dict__.items():
if name != 'text' and execute:
globals()[name](' '.join(args.text))