sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer) sys.stderr = codecs.getwriter('utf8')(sys.stderr.buffer) except: # python 2 sys.stdout = codecs.getwriter('utf8')(sys.stdout) sys.stderr = codecs.getwriter('utf8')(sys.stderr) # SAVE YOUR APPLICATION CREDENTIALS IN TwitterAPI/credentials.txt. o = TwitterOAuth.read_file() api = TwitterAPI(o.consumer_key, o.consumer_secret, o.access_token_key, o.access_token_secret) # GET 20 TWEETS CONTAINING 'ZZZ' api.request('search/tweets', {'q':'zzz'}) iter = api.get_iterator() for item in iter: sys.stdout.write('%s\n' %item['text']) """ # POST A TWEET sys.stdout.write('%s\n' api.request('statuses/update', {'status':'This is another tweet!'})) # STREAM TWEETS FROM AROUND NYC api.request('statuses/filter', {'locations':'-74,40,-73,41'}) iter = api.get_iterator() for item in iter: sys.stdout.write('%s\n' %item['text']) # GET TWEETS FROM THE PAST WEEK OR SO CONTAINING 'LOVE' pager = TwitterRestPager(api, 'search/tweets', {'q':'love'});
raw.append(row) src_id = list(np.array(raw)[:, 0]) with open(os.path.join(thisDir, "friends"), "a") as file1: with open(os.path.join(thisDir, "followers"), "a") as file2: for i in range(start, N): if count > 13: print "sleeping" time.sleep(900) count = 1 call.request("friends/ids", {"user_id": src_id[i], "count": "5000", "stringify_ids": "true"}) friend = call.get_iterator() call.request("followers/ids", {"user_id": src_id[i], "count": "5000", "stringify_ids": "true"}) follow = call.get_iterator() if "<Response [200]>" == str(call.response): for item in friend: target_ids = item["ids"] file1.write(src_id[i] + "\t" + "\t".join(target_ids) + "\n") for item in follow: target_ids = item["ids"] file2.write(src_id[i] + "\t" + "\t".join(target_ids) + "\n") count += 1 elif "<Response [200]>" != str(call.response): file1.write(src_id[i] + "\tmiss") file2.write(src_id[i] + "\tmiss")
call = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) print "0" #Contacts the twitter REST Api and asked for tweets satisfying the following criteria thisDir = os.path.dirname(os.path.realpath(__file__)) print "1" #finish_time = time.time() + 28800 while ('true'): # for 8 hours print "2" call.request('statuses/sample') #call.request('friends/ids', '{user_id = 392754790176129024, count = 250}') print "New Call" response = call.get_iterator() with open(os.path.join(thisDir, 'id_tweets.txt'), 'a') as afile: for item in response: try: lang = item[u'lang'].encode('utf-8') if (lang == 'en'): ident = item[u"user"][u'id_str'].encode('utf-8') tweet = item[u'text'].encode('utf-8') tweet = " ".join(tweet.split( )) #Removes the excess whitespace from the tweet afile.write(ident + '\t' + tweet + '\n') #Writes the id and tweet to a file print ident + '\t' + tweet + '\n' elif ('warning' in item):
# python 3 sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer) sys.stderr = codecs.getwriter('utf8')(sys.stderr.buffer) except: # python 2 sys.stdout = codecs.getwriter('utf8')(sys.stdout) sys.stderr = codecs.getwriter('utf8')(sys.stderr) # SAVE YOUR APPLICATION CREDENTIALS IN TwitterAPI/credentials.txt. o = TwitterOAuth.read_file() api = TwitterAPI(o.consumer_key, o.consumer_secret, o.access_token_key, o.access_token_secret) # GET 20 TWEETS CONTAINING 'ZZZ' api.request('search/tweets', {'q': 'zzz'}) iter = api.get_iterator() for item in iter: sys.stdout.write('%s\n' % item['text']) """ # POST A TWEET sys.stdout.write('%s\n' api.request('statuses/update', {'status':'This is another tweet!'})) # STREAM TWEETS FROM AROUND NYC api.request('statuses/filter', {'locations':'-74,40,-73,41'}) iter = api.get_iterator() for item in iter: sys.stdout.write('%s\n' %item['text']) # GET TWEETS FROM THE PAST WEEK OR SO CONTAINING 'LOVE' pager = TwitterRestPager(api, 'search/tweets', {'q':'love'}); iter = pager.get_iterator()
with open(os.path.join(thisDir, "friends"), 'a') as file1: with open(os.path.join(thisDir, "followers"), 'a') as file2: for i in range(start, N): if (count > 13): print "sleeping" time.sleep(900) count = 1 call.request('friends/ids', { 'user_id': src_id[i], 'count': "5000", 'stringify_ids': 'true' }) friend = call.get_iterator() call.request('followers/ids', { 'user_id': src_id[i], 'count': "5000", 'stringify_ids': 'true' }) follow = call.get_iterator() if ('<Response [200]>' == str(call.response)): for item in friend: target_ids = item["ids"] file1.write(src_id[i] + "\t" + "\t".join(target_ids) + '\n') for item in follow: target_ids = item["ids"] file2.write(src_id[i] + "\t" + "\t".join(target_ids) + '\n')
access_token_key = "1587711043-x8cU7WbWjPBBXuwykoMgNwnpU6sO76BeBtHh7Aj" call = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) print "0" #Contacts the twitter REST Api and asked for tweets satisfying the following criteria thisDir = os.path.dirname(os.path.realpath(__file__)) print "1" #finish_time = time.time() + 28800 while('true'): # for 8 hours print "2" call.request('statuses/sample') #call.request('friends/ids', '{user_id = 392754790176129024, count = 250}') print "New Call" response = call.get_iterator() with open(os.path.join(thisDir, 'id_tweets.txt'), 'a') as afile: for item in response: try: lang = item[u'lang'].encode('utf-8') if(lang == 'en'): ident = item[u"user"][u'id_str'].encode('utf-8') tweet = item[u'text'].encode('utf-8') tweet = " ".join(tweet.split()) #Removes the excess whitespace from the tweet afile.write(ident +'\t' + tweet + '\n') #Writes the id and tweet to a file print ident +'\t'+ tweet + '\n' elif('warning' in item): print item