def get_tweets_by_GPS(location, text_only=False): api = TwitterAPI( Config.CONSUMER_KEY, Config.CONSUMER_SECRET, Config.ACCESS_TOKEN_KEY, Config.ACCESS_TOKEN_SECRET ) response = api.request('search/tweets', {'geocode': location}) tweets = [] for item in response.get_iterator(): data = {} if "text" in item: if text_only: data['text'] = ' '.join( re.sub('(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+://\S+)', " ", item['text']).split()) else: data['text'] = item['text'] if 'profile_image_url' in item: data['profile_image'] = item['profile_image_url'] elif 'profile_image_url_https' in item: data['profile_image'] = item['profile_image_url_https'] tweets.append(data) return tweets
def share_yap_on_twitter(user,yap,twitter_access_token_key,twitter_access_token_secret): twitter = TwitterAPI(consumer_key=settings.TWITTER_CONSUMER_KEY,consumer_secret=settings.TWITTER_CONSUMER_SECRET,access_token_key=twitter_access_token_key,access_token_secret=twitter_access_token_secret) status = "@" + str(yap.user.username) + " yapped \"" + str(yap.title) + '\" - web.yapster.co/yap/' + str(yap.pk) length_of_status = len(status) if yap.picture_flag == True: if length_of_status > 118: extra_length_of_title = length_of_status - 121 title = yap.title[:(-extra_length_of_title)].upper() + "..." # -3 for three dots ... elif length_of_status <= 118: title = yap.title b = connect_s3(bucket_name="yapsterapp") yap_picture_key = b.get_key(yap.picture_path) fp = StringIO.StringIO() yap_picture_key.get_file(fp) yap_picture = fp.getvalue() r = twitter.request('statuses/update_with_media',{'status':status}, {'media[]': yap_picture}) elif yap.picture_flag == False: if length_of_status > 140: extra_length_of_title = length_of_status - 143 title = yap.title[:(-extra_length_of_title)].upper() + "..." elif length_of_status > 140: title = yap.title r = twitter.request('statuses/update',{'status':status}) #print("Success" if r.status_code == 200 else 'Failure') if r.status_code == 200: return json.loads(r.text)['id'] else: pass
def sendToTwitter(blurb, imgName): # These tokens are necessary for twitter contact and must be obtained by creating # an app on app.twitter.com, linking to an account, and setting perms to read/write accToken = "XXXXX" accTokenSecret = "XXXXX" apiKey = "XXXXX" apiSecret = "XXXXX" api = TwitterAPI(apiKey, apiSecret, accToken, accTokenSecret) if (imgName != "" and imgName != "fail"): open_file = open(imgName, "rb") r = api.request("statuses/update_with_media", {"status": blurb}, {"media[]": open_file.read()}) if r.status_code is not 200: # Try once more, truncating period r = api.request("statuses/update_with_media", {"status": blurb[:len(blurb)-1]}, {"media[]": open_file.read()}) if r.status_code is not 200: print("Image tweet failed (status code " + str(r.status_code) + "). Trying tweet without image...") r = api.request("statuses/update", {"status": blurb}) else: r = api.request("statuses/update", {"status": blurb}) return r.status_code
def popular_tweets(tweeter): tw_consumer_key = os.getenv("tw_consumer_key") tw_consumer_secret = os.getenv("tw_consumer_secret") api = TwitterAPI(tw_consumer_key, tw_consumer_secret, auth_type='oAuth2') content = api.request('statuses/user_timeline', {'screen_name': tweeter}) for tweet in content: Tweet.objects.update_or_create( twt_id = tweet['id'], defaults={ 'username': tweet['user']['screen_name'], 'created_at': tweet['created_at'], 'text': tweet['text'], 'retweet_count': tweet['retweet_count'], 'favorite_count': tweet['favorite_count'], 'popular': (tweet['retweet_count'] + tweet['favorite_count']), }) popular = [] popular = Tweet.objects.filter(username=tweeter).order_by('-popular')[:20] tweet_ids = [] # collecting the IDs to feed into the twitter api for tweet in popular: tweet_ids.append(tweet.twt_id) popular_tweets = [] for item in tweet_ids: tweet = requests.get("https://api.twitter.com/1.1/statuses/oembed.json?id={}".format(item)).json()["html"] popular_tweets.append(tweet) return popular_tweets
def get_twitter_result(search): CONSUMER_KEY = 'SECRET' CONSUMER_SECRET = 'SECRET' ACCESS_TOKEN_KEY = 'SECRET' ACCESS_TOKEN_SECRET = 'SECRET' api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET) search_pattern = [search.replace(" ",""), search, search.split()[0], search[1:], '@' + search.replace(" ","")[1:]] twitter_result = [] MAX_TWEET = 3 count = MAX_TWEET try: for pattern in search_pattern: r = api.request('search/tweets',{'q':pattern, 'count':count, 'result_type':'mixed', 'lang':'en'}) for item in r.get_iterator(): twitter_result.append([item['user']['screen_name'], item['user']['profile_image_url'], item['text']]) if len(twitter_result) > MAX_TWEET-1: break else: count = MAX_TWEET - len(twitter_result) except: twitter_result = [] return twitter_result
def main(): db.connect() db.create_table(Rows, safe=True) api = TwitterAPI('65dxiPhn10SE3zJT6fVEPWsVx', 'VmK0rQFapjymwtSNpidi0Yfe16mjMdHXBhZTmYVc8dwb1joAxX', '109566527-ZufkixJ3XInW91ZE34hGFtxQcrXGOzBS7vBdApUP', '0N5poNnJoDsWO8Yvf1FfNECfOJKJm7nKthPVzow7apyPu') user_ids = [x.twi_id for x in Tweeple.select().where(Tweeple.filled_tweets == 0)] counter = 0 bad_conter = 0 iter = len(user_ids) print(iter) sleep(15*60) for twi_id in user_ids: r = api.request('statuses/user_timeline', {'user_id': twi_id, 'count': '100'}) with db.transaction(): user = Tweeple.get(twi_id=twi_id) for tweet in r: tweet['user'] = user if 'id' not in tweet.keys(): iter -= 1 counter += 1 bad_conter += 1 print(iter, 'Bad') continue del tweet['id'] Rows.create(**tweet) user.filled_tweets = 1 user.save() counter += 1 if counter > 290: sleep(15*60) counter = 0 iter -= 1 print(iter) print(bad_conter)
def queryTwitterApi(request): ''' query Twitter API using TwitterAPI python wrapper lib and return TWitter API response in JSON format IMPORTANT: application-only authentication is required for accessing Twitter API a Twitter app must be created with Twitter and corresponding consumer key and secret generated. ensure key and secret stored in auth.py handles GET request with 'action' parameter for Twitter API type, such as 'user/show', 'queryKey', such as 'screen_name' for 'user/show' query, 'queryVal' for actual query value ''' global api if api=='': # TwitterAPI object has not been initialized # obtain appliation-only authentication and initialize object api=TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, auth_type='oAuth2') # perform TwitterAPI query and return response body action=request.GET['action']; queryKey=request.GET['queryKey']; queryVal=request.GET['queryVal']; resp=api.request(action,{queryKey: queryVal}) return HttpResponse(resp.text)
class TwitterClient(object): """This class is used for the communication with the Twitter API""" def __init__(self, options): self.api = TwitterAPI(\ options['consumer_key'],\ options['consumer_secret'],\ options['access_token_key'],\ options['access_token_secret']) def _sleep(self): sleep(10) def get_followers(self): followers = [] for id in self.api.request('followers/ids'): followers.append(id) self._sleep() return followers def get_following(self): following = [] for id in self.api.request('friends/ids'): following.append(id) self._sleep() return following def follow_user(self, user_id): r = self.api.request('friendships/create', {'user_id': user_id }) if r.status_code == 200: status = r.json() print 'followed %s' % status['screen_name'] self._sleep() def unfollow_user(self, user_id): r = self.api.request('friendships/destroy', {'user_id': user_id}) if r.status_code == 200: status = r.json() print 'unfollowed %s' % status['screen_name'] self._sleep() def get_users_for_topic(self, topic): r = self.api.request('search/tweets', { 'count':200, 'q':topic, 'lang':"de"}) statuses = r.json()['statuses'] how_many = len(statuses) print("fetched " + str(how_many) + " tweets") self._sleep() users = map(lambda s : s['user'], statuses) users_id_unique = set() distinct = [] for user in users: if(user['id_str'] not in users_id_unique): distinct.append(user) users_id_unique.add(user['id_str']) return distinct
def get_happy_sad_count(location): """ Gets the happy and sad count on the current query Returns the happy count and the sad count """ api = TwitterAPI('SSuspEaugDP7vcITlMA', '6FdUGOTcqlcoufjz62frkKHJ5cgm1vnaIHqjlboOHg', '78925893-xs57TGEPdYpSmbfBKi1XPKVliu5i2TrDRFOEPwVmt', 'fAVXZr4cnEi2hNtVvLyh68mxh4JysMbyJVlBbefhLvf5v') happy_count = 0 sad_count = 0 happy_query = {'q':':)', 'lang':'en', 'count':'100', 'result_type':'recent'} sad_query = {'q':':(', 'lang':'en', 'count':'100', 'result_type':'recent'} if location: happy_query['locations'] = '{0}, {1}'.format(location[0], location[1]) sad_query['locations'] = '{0}, {1}'.format(location[0], location[1]) happy = api.request('search/tweets', happy_query) sad = api.request('search/tweets', sad_query) h_list = [] s_list = [] for item in happy: happy_count += 1 h_list.append(item) for item in sad: sad_count += 1 s_list.append(item) tweet = (h_list[int(random()*len(h_list))]['text'], "positive") if random()*2 < 1: tweet = (s_list[int(random()*len(s_list))]['text'], "negative") return happy_count, sad_count, tweet
def search_twitter_by_term(term, geo_search=False, lat="", lng=""): consumer_key = TWITTER_API_KEYS['consumer_key'] consumer_secret = TWITTER_API_KEYS['consumer_secret'] access_token_key = TWITTER_API_KEYS['access_token_key'] access_token_secret = TWITTER_API_KEYS['access_token_secret'] api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) lang = 'en' count = 10 try: items = [] options = {'q': term, 'lang': lang, 'count': count} if geo_search and lat and lng: options['geocode'] = lat + ',' + lng + ',100mi' r = list(api.request('search/tweets', options)) for item in r: if 'text' in item and 'created_at' in item and 'user' in item: text = process_twitter_text(item['text']) created_at = format_date(item['created_at']) user_name = item['user']['name'] user_screen_name = item['user']['screen_name'] profile_url = item['user']['profile_image_url'] item = TwitterItem(text, created_at, user_name, user_screen_name, profile_url) items.append(item) except: raise TwitterAPIQueryError return items
def tweet_movie(fname, which_fun): bytes_sent = 0 total_bytes = os.path.getsize(fname) file = open(fname, 'rb') api = TwitterAPI(t_keys['CONSUMER_KEY'], t_keys['CONSUMER_SECRET'], t_keys['ACCESS_KEY'], t_keys['ACCESS_SECRET']) r = api.request('media/upload', {'command':'INIT', 'media_type':'video/mp4', 'total_bytes':total_bytes}) check_status(r) media_id = r.json()['media_id'] segment_id = 0 while bytes_sent < total_bytes: chunk = file.read(4*1024*1024) r = api.request('media/upload', {'command':'APPEND', 'media_id':media_id, 'segment_index':segment_id}, {'media':chunk}) check_status(r) segment_id = segment_id + 1 bytes_sent = file.tell() print('[' + str(total_bytes) + ']', str(bytes_sent)) r = api.request('media/upload', {'command':'FINALIZE', 'media_id':media_id}) check_status(r) r = api.request('statuses/update', {'status': which_fun, 'media_ids':media_id}) check_status(r) print("Tweeted about " + which_fun) return(r)
def loadTweets(request): path = (os.path.dirname(os.path.dirname(os.path.realpath(__file__)))+"\\files\\access.json").replace('\\','/') #url = static('/sentiments/us-states.json') with open(path) as f: data = json.load(f) ACCESS_TOKEN_KEY = data['ACCESS_TOKEN_KEY'] ACCESS_TOKEN_SECRET = data['ACCESS_TOKEN_SECRET'] CONSUMER_KEY = data['CONSUMER_KEY'] CONSUMER_SECRET = data['CONSUMER_SECRET'] api = TwitterAPI(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN_KEY,ACCESS_TOKEN_SECRET) SEARCH_TERM = 'trump' count = 0 lang = 'en' geocode = "37.6,-95.665,1400mi" r = api.request('search/tweets', {'lang': lang, 'q': SEARCH_TERM, 'count': count, 'geocode': geocode}) for item in r: if item['place'] != None : text = unicode(item['text']).encode('utf-8') state = unicode(item['place']['full_name'][-2:]).encode("utf-8") score = getScore(text) if score > -2: queryState = State.objects.filter(abbrev=state) queryTweet = Tweet.objects.filter(text=text) #Check if tweet not already in the Database & if state exists if queryState.count()>0 and queryTweet.count() == 0 : state = queryState[0] _, created = Tweet.objects.get_or_create(text=text,state=state,score=score,) count +=1 return HttpResponse("Load successful "+ str(count)+" tweets has been added to the DB")
def search(): global api api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) r = api.request('search/tweets', {'q':'hate speech twitter bot','count':10}) for t in r: tweet_id = str(t[u'id']) #print tweet_id try: f = open(idfile) except IOError: with open(idfile,'w') as f: print "Creating file: " + idfile f.write("tweet id" + '\n') f = open(idfile) s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) if s.find(tweet_id) != -1: print '= FOUND = ' + tweet_id f.close() else: print '= NEW TWEET = ' + tweet_id f.close() with open(idfile,'a') as f: print "Writing tweet_id to file..." f.write(tweet_id + '\n') response(t) print '\t'+t['user']['name']+" :: "+t['user']['screen_name'] print '\t'+t[u'text'] createURL(t) print
def check_stream(self): api = TwitterAPI(consumer_key=config.get("twitter.consumer_key"), consumer_secret=config.get("twitter.consumer_secret"), access_token_key=config.get("twitter.access_token"), access_token_secret=config.get("twitter.access_token_secret") ) while True: tweeter_stream = api.request('statuses/filter', {'locations': "-123.66,32.54,-113.77,39.57,-93.82,24.32,-65.08,47.84"}) # tweeter_stream = api.request('statuses/filter', {'locations': self.get_geo_str()}) # print(self.get_geo_str()) start_time = time.time() # print("len") # print((tweeter_stream.text)) # print((tweeter_stream.stream)) # Stream data for tweet in tweeter_stream: # Break out for loop at specified time interval to query new sets of Geo Coordinates if time.time() > start_time + self.REFRESH_INTERVAL: print("breaktime") break # Publish tweets to Kafka print(tweet)
def follower_crawler(name, key): api = TwitterAPI(**key) while not q.empty(): team = q.get() ids = set() resps = list() cursor = -1 while cursor != 0: request = api.request('followers/ids', {'screen_name': team, 'count': 5000, 'cursor': cursor}) print '{name} [{code}]: {team} - {cursor}'.format(team = team, cursor = cursor, name = name, code = request.status_code) if request.status_code != 429: resp = [i for i in request][0] resps.append(resp) cursor = resp['next_cursor'] ids = ids.union(set(resp['ids'])) if int(request.headers['x-rate-limit-remaining']) == 0: print '{name} sleeping'.format(name = name) time.sleep(15*60) with lock: follower_lists[team] = {'ids': ids, 'responses': resps} with open('../followers/{}.txt'.format(team), 'w') as f: f.writelines([str(id)+'\n' for id in ids]) q.task_done()
def mineTweets(username): tweet_list = [] api = TwitterAPI(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN_KEY,ACCESS_TOKEN_SECRET) r = api.request('statuses/user_timeline', {'screen_name': username}) for item in r: tweet_list.append((item['text'],item['created_at'])) return tweet_list
def twitter(request, search): consumer_key = '' consumer_secret = '' access_token_key = '' access_token_secret = '' api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) r = api.request('search/tweets', {'q':str(search)}) data = json.loads(r.text) tweetInfo = '[' for i in range(0,len(data['statuses'])): tweetInfo += '{' tweetInfo += '"id":"'+data['statuses'][i]['id_str']+'",' tweetInfo += '"avatar":"'+data['statuses'][i]['user']['profile_image_url']+'",' tweetInfo += '"tweetedOn":"'+data['statuses'][i]['created_at']+'",' tweetInfo += '"tweetedOnParsed":"'+datetime.datetime.strptime(data['statuses'][i]['created_at'], "%a %b %d %X %z %Y").strftime("%X")+'",' tweetInfo += '"screenname":"'+data['statuses'][i]['user']['screen_name']+'",' tweetInfo += '"status":"'+data['statuses'][i]['text'].replace('"',"'").replace('\n','').replace('\r','')+'",' tweetInfo += '"truncated":"'+str(data['statuses'][i]['truncated'])+'",' tweetInfo += '"retweeted":"'+str(data['statuses'][1]['retweeted'])+'"' tweetInfo += '}' if i+1 < len(data['statuses']): tweetInfo += ',' tweetInfo += ']' return HttpResponse(tweetInfo)
def suck(save_item, handle_error, source): api = TwitterAPI(settings.TWITTER['consumer_key'], settings.TWITTER['consumer_secret'], settings.TWITTER['access_token'], settings.TWITTER['access_token_secret']) if 'lastRetrieved' not in source: source['lastRetrieved'] = {} for l in lists.items: request_filters = { 'slug':l['slug'], 'owner_screen_name':l['owner_screen_name'], 'per_page': 100 } if l['owner_screen_name'] in source['lastRetrieved']: request_filters['since_id'] = source['lastRetrieved'][l['owner_screen_name']] r = api.request('lists/statuses', request_filters) new_since_id = None if r.status_code == 200: for record in r.get_iterator(): if not new_since_id: new_since_id = record['id_str'] source['lastRetrieved'][l['owner_screen_name']] = new_since_id item = transform(record) save_item(item) return source['lastRetrieved']
def execute(PLAYER_ONE, PLAYER_TWO): api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET) count = 30 lang = "en" # List of all the tweets collection1 = [] collection2 = [] followerCount1 = [] followerCount2 = [] r1 = api.request("search/tweets", {"lang": lang, "q": PLAYER_ONE, "count": count, "since": last_sunday}) for item in r1: ID = item["user"]["id"] followCount = getFollowerCount(ID) followerCount1.append(followCount) collection1.append(item["text"]) print print r2 = api.request("search/tweets", {"lang": lang, "q": PLAYER_TWO, "count": count, "since": last_sunday}) for item in r2: ID = item["user"]["id"] followCount = getFollowerCount(ID) followerCount2.append(followCount) collection2.append(item["text"]) score1 = calculate_sentiment(collection1, followerCount1) score2 = calculate_sentiment(collection2, followerCount2) print score1 print score2 if score1 < 0 and score2 > 0: return write_json(PLAYER_ONE, PLAYER_TWO, 0, 100) elif score1 > 0 and score2 < 0: return write_json(PLAYER_ONE, PLAYER_TWO, 100, 0) combinedScore = score1 + score2 print float(score1) / combinedScore return write_json(PLAYER_ONE, PLAYER_TWO, float(score1) / combinedScore * 100, float(score2) / combinedScore * 100)
class TwitterNotificationService(BaseNotificationService): """Implementation of a notification service for the Twitter service.""" def __init__(self, consumer_key, consumer_secret, access_token_key, access_token_secret, username): """Initialize the service.""" from TwitterAPI import TwitterAPI self.user = username self.api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) def send_message(self, message="", **kwargs): """Tweet a message.""" if self.user: resp = self.api.request( 'direct_messages/new', {'text': message, 'user': self.user}) else: resp = self.api.request('statuses/update', {'status': message}) if resp.status_code != 200: import json obj = json.loads(resp.text) error_message = obj['errors'][0]['message'] error_code = obj['errors'][0]['code'] _LOGGER.error("Error %s : %s (Code %s)", resp.status_code, error_message, error_code)
def post_to_twitter(sender, instance, *args, **kwargs): """ Post new saved objects to Twitter. """ if settings.DEBUG == True: return # print("Status", instance._state.adding) if instance.twitter == False: # print("Not Twitte ") return data = {"status": str(get_body_twitte(instance.get_twitter_message()))} media = {} tw_url = 'statuses/update' image = instance.image if image: file = open(os.path.join(settings.MEDIA_ROOT, image.name), 'rb') media.update({'media[]': file.read()}) tw_url = 'statuses/update_with_media' consumer_key = settings.TWITTER_CONSUMER_KEY consumer_secret = settings.TWITTER_CONSUMER_SECRET access_token_key = settings.TWITTER_ACCESS_TOKEN_KEY access_token_secret = settings.TWITTER_ACCESS_TOKEN_SECRET api = TwitterAPI( consumer_key, consumer_secret, access_token_key, access_token_secret) r = api.request(tw_url, data, media)
def run_async(self): auth = TwitterOAuth.read_file_object(self.wrapper.getFileObject(__file__, "TwitterAPI/credentials.txt")) api = TwitterAPI(auth.consumer_key, auth.consumer_secret, auth.access_token_key, auth.access_token_secret) # list of user id to follow (by Nazli) """ '@StiftenAGF','@jpaarhus','#Aarhus','@AarhusKultur','@smagaarhus','@AarhusNyheder','@AarhusPortaldk', '@Aarhus2017','@OpenDataAarhus', '@aarhusupdate','@AskVest','@SundhedOgOmsorg','@ArhusVejr','@aarhus_ints', '@AGFFANdk','@AarhusCykelby','@VisitAarhus','@larshaahr' """ users = ['3601200017', '3370065087', '3330970365', '2911388001', '2706568231', '2647614595', '2201213730', '1324132976', '1065597596', '816850003', '763614246', '210987829', '159110346', '112585923', '77749562', '38227253', '36040150'] # list of terms to track tracks = ['#Aarhus'] print "TAA ready." try: r = api.request('statuses/filter', {'locations': self.location, 'follow': ','.join(users), 'track': ','.join(tracks)}) except: print r.text for item in r: self.tweet = item # print item self.wrapper.update() if self.abort.is_set(): break
def get_photo_stream(): api = TwitterAPI( os.environ['consumer_key'], os.environ['consumer_secret'], os.environ['access_token_key'], os.environ['access_token_secret'] ) # Any geo-tagged tweet, don't have access to firehose :( r = api.request('statuses/filter', {'locations': '-180,-90,180,90'}) for item in r.get_iterator(): try: if 'media_url' in item['entities']['media'][0] and item['entities']['media'][0]['type'] == "photo": media_url = item['entities']['media'][0]['media_url'] content = urllib.urlopen(media_url).read() img_array = np.asarray(bytearray(content), dtype=np.uint8) img = cv2.imdecode(img_array, 0) if img is not None and img.shape[::-1] in [(576, 1024), (600, 900)]: # Compressed iPhone 4/5 sizes if is_snap(img): print media_url except KeyError: continue
def fetch(self): count = 0 api = TwitterAPI(self.consumerKey, self.consumerSecret, self.AccessKey, self.AccessToken, auth_type='oAuth2') r = api.request('search/tweets', {'q': 'AAPL', 'count':1, 'result_type': 'recent'}) for item in r: count+=1 print(count, item['id'], "\t", item['created_at'] if 'text' in item else item + '\n') self.lastId = item['id']-1 temp = item['id'] while self.lastId > (self.previousLast + 1): r = api.request('search/tweets', {'q': 'AAPL', 'count':10, 'result_type': 'recent', 'max_id': self.lastId}) for item in r: count+=1 print(count, item['id'], "\t", item['created_at'] if 'text' in item else item + '\n') print ("Done up to: ", item['id'], "\t", self.lastId, "\t", self.previousLast) self.lastId = item['id'] - 1 self.previousLast = temp print ("Done upto ", self.previousLast)
def func2(i): print('hello from thread %s\n' % i) TRACK_TERM = 'pizza' ACCESS_TOKEN_KEY = "91070076-hXHCPWZNQiJ1LwwUFZLzjGJXMZLwoJkbZLKVCi6jx" ACCESS_TOKEN_SECRET = "V0zScYzfuKBTl0VWWHpG84Y34gNGkIN80PDs5eQsnEdgO" CONSUMER_KEY = "vVypmaitmk21exXEo22Bi691H" CONSUMER_SECRET = "9wWmNH6UgDafRuZKTZ02G7arEf9YsJfH2oYanaE6itk0FyMFKA" api = TwitterAPI( CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET) r = api.request('statuses/filter', {'track': TRACK_TERM}) c = 0 with open("example2.txt", "w+") as ed: for i in r: # ed.write(str(i["entities"]["hastags"])+"\n") if "pizza" in i["text"]: c+=1 ed.write(str(c)+"," +str(c*3)+"\n")
def handle(self, *args, **options): api = TwitterAPI(**settings.TWITTER_CREDENTIALS) users = settings.EGA_ADMINS.keys() # matches in the last hour matches = Match.objects.filter( when__isnull=False, when__range=(now() - timedelta(minutes=30), now())) for m in matches: predictions = m.prediction_set.filter( user__username__in=users).order_by('user') if predictions: data = ', '.join([ "%s dice %s %d-%d %s" % ( settings.EGA_ADMINS[p.user.username], m.home.code, p.home_goals, p.away_goals, m.away.code) for p in predictions if p.home_goals is not None and p.away_goals is not None]) tweet = u"En juego: %s vs %s\n%s" % ( m.home.name, m.away.name, data) api.request('statuses/update', {'status': tweet}) # get predictions count and tweet counts = get_top_predictions(m) preds = '\n'.join([ '#%s %d - %d #%s (%d)' % (m.home.code, r[0], r[1], m.away.code, r[2]) for r in counts ]) tweet = 'Los resultados más pronosticados:\n' + preds api.request('statuses/update', {'status': tweet})
def twitterScraper(bias, url): try: ##Load keys from other file and initialize the APIS consumer_key, consumer_secret, access_token_key, access_token_secret = getKeys() api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) alchemyAPI = AlchemyAPI() ##Get the list of keywords for the page response = alchemyAPI.keywords('url', url, {'sentiment': 1}) ##Put relevant keywords in a list keys = [] for x in response['keywords']: if float(x['relevance']) > 0.6: keys.append(x['text']) if len(keys) >= 5: break alreadyAdded = set() htmlString = "<body>\n" ##Get some tweets for each of the relevant keywords for key in keys: key += ' ' + bias; tweetList = api.request('search/tweets', {'q':str(key), 'lang':'en'}) y = [] ## redundant tweets for x in tweetList: ## redundant tweets if x['text'][:20] not in y: y.append(x['text'][:20]) else: continue ##Skip non english if x['lang'] != 'en': continue if x['text'][:2] == "RT": continue htmlString += "<blockquote class=\"twitter-tweet\" lang=\"en\"><p>" htmlString += x['text'] htmlString += "</p>—" htmlString += x['user']['name'] htmlString += " (@" htmlString += x['user']['screen_name'] htmlString += ") <a href=\"https://twitter.com/" htmlString += x['user']['screen_name'] htmlString += "/status/" htmlString += str(x['id']) htmlString += "\">" htmlString += "</a></blockquote> <script async src=\"http://platform.twitter.com/widgets.js\" charset=\"utf-8\"></script>\n" htmlString += "</body>" return htmlString except Exception as e: return "There was some kind of error, Go home"
def startStream(self, global_keywords, local_keywords, callback): self.global_keywords = global_keywords self.local_keywords = local_keywords last_connection = None reconnection_pause = 1 # in seconds (used to scale back in case of recurrent connection problems) min_recon_pause = 1 # in seconds max_recon_pause = 60*10 # 10 minutes! receivedTweetIDs = set([]) self.streaming = True while self.streaming: try: terms = set(self.global_keywords + self.local_keywords) logging.info("Starting connection with terms: \n" + pprint.pformat(terms)) api = TwitterAPI(self.consumer_key, self.consumer_secret, self.access_key, self.access_secret) r = api.request('statuses/filter', {'track':','.join(terms)}) logging.info("Response Status Code: %d" % (r.status_code)) # see https://dev.twitter.com/docs/streaming-apis/connecting for details about this code if r.status_code == 200: # Success for tweet in r.get_iterator(): if ('id' in tweet) and (tweet['id'] not in receivedTweetIDs): receivedTweetIDs.add(tweet['id']) # self._processTweet(tweet) callback(tweet) elif STATUS_CODE_MESSAGES.has_key(r.status_code): logging.info(STATUS_CODE_MESSAGES[r.status_code]) if r.status_code == 503: time.sleep(60*5) # wait 5 minutes else: return else: logging.info("Received unknown status code: %d" % r.status_code) except Exception, e: logging.error("Twitter ConnectionError. Reason: %s\n" % (e)) logging.info("It should reconnect automatically\n") # hacky! time_from_last_attempt = (datetime.now() - last_connection).seconds if time_from_last_attempt > max_recon_pause: reconnection_pause = min_recon_pause # Pause before trying to reconnect # FIXME: this was a quick hack! We should follow the guidelines suggested in # https://dev.twitter.com/docs/streaming-apis/connecting logging.debug("Will wait %d seconds before attempting to reconnect ..." % (reconnection_pause)) time.sleep(reconnection_pause) if time_from_last_attempt < max_recon_pause: reconnection_pause *= 2
def _search_twitter(query): # Search Twitter consumer_key = os.environ.get("TWITTER_CONSUMER_KEY") consumer_secret = os.environ.get("TWITTER_CONSUMER_SECRET") access_token_key = os.environ.get("TWITTER_ACCESS_TOKEN") access_token_secret = os.environ.get("TWITTER_ACCESS_TOKEN_SECRET") api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) return [item['text'] for item in (api.request('search/tweets', {'q': query}))][:10]
def _getTweets(user): api = TwitterAPI("aiC1HsGnI81CrWQ78ejw","244t73B6eDybGbxPrqcxfXMjdfy3OBeqKndcnBakE5M","237561704-JIg6SthgLfZge8naa7Pun3ANpSo3r0BprtlrLFto","7FGLhL5UFW0F1RWRraF3HqPvY5cvhmOXMQt2FKjxC0") r = api.request('statuses/user_timeline', {'screen_name':user}) tempList = [] for item in r: tempObj = item['text'] tempList.append(tempObj) return Response(json.dumps(tempList), mimetype='application/json')
print(count + skip + total_skip) elif 'limit' in item: skip = item['limit'].get('track') #print('\n\n\n*** Skipping %d tweets\n\n\n' % (total_skip + skip)) elif 'disconnect' in item: raise Exception('Disconnect: %s' % item['disconnect'].get('reason')) except Exception as e: print('*** MUST RECONNECT %s' % e) total_skip += skip if __name__ == '__main__': parser = argparse.ArgumentParser(description='Count occurance of word(s).') parser.add_argument('-past', action='store_true', help='search historic tweets') parser.add_argument('-oauth', metavar='FILENAME', type=str, help='read OAuth credentials from file') parser.add_argument('words', metavar='W', type=str, nargs='+', help='word(s) to count the occurance of') args = parser.parse_args() oauth = TwitterOAuth.read_file(args.oauth) api = TwitterAPI(oauth.consumer_key, oauth.consumer_secret, oauth.access_token_key, oauth.access_token_secret) try: if args.past: count_old_tweets(api, args.words) else: count_new_tweets(api, args.words) except KeyboardInterrupt: print('\nTerminated by user\n') except Exception as e: print('*** STOPPED %s\n' % e)
def main(): # argument parsing args = vars(parse_arguments()) # logging logging.basicConfig( filename=LOG_FOLDER + 'columnistos.log', format='%(asctime)s %(name)s %(levelname)8s: %(message)s', level=logging.INFO) logging.getLogger("requests").setLevel(logging.WARNING) logging.info('Starting script') # keys consumer_key = os.environ['TWITTER_CONSUMER_KEY'] consumer_secret = os.environ['TWITTER_CONSUMER_SECRET'] access_token = os.environ['TWITTER_ACCESS_TOKEN'] access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET'] api = TwitterAPI(consumer_key, consumer_secret, access_token, access_token_secret) auth_ids = screen_names_to_id(api, AUTHORIZED_SCREEN_NAMES) db = dataset.connect(SQLITE_URL) # DMs if args['dm']: logging.info('Checking if DM needed') logging.info('Need to send/process DM') check_dms(api, auth_ids) data_to_send = get_author_no_gender() send_dms(api, data_to_send, auth_ids) if args['tweet']: logging.info('Checking if ready to tweet') if 'sites' not in db.tables: sites = db.create_table('sites') sites_in_articles = db['articles'].distinct('site') for row in sites_in_articles: sites.insert(dict(name=row['site'], last_checked_id=0)) daily_stats = list() for row in db['sites']: stats = get_stats(row) if stats: daily_stats.append(stats) if stats['total'] > MIN_NEW_ARTICLES: text_to_tweet = select_text(stats) logging.info('About to tweet for {} with {}'.format( row['name'], text_to_tweet)) if text_to_tweet and tweet_text(api, text_to_tweet): logging.info('Individual tweet for {} sent'.format( row['name'])) else: logging.info( 'Not enough new articles for {}, stats: {}'.format( row['name'], stats)) # update last_checked_id db['sites'].update(dict(name=row['name'], last_checked_id=stats['last_id']), ['name']) if len(daily_stats) > 0: text_to_tweet = daily_tweet(daily_stats) if text_to_tweet and tweet_text(api, text_to_tweet): logging.info('Resume tweeted') else: logging.warning('Resume failed') else: logging.info('No resume to send') logging.info('Script finished')
import Tkinter as tk import webbrowser from TwitterAPI import TwitterAPI import csv import wikipedia as wiki import datetime as dt import time, ast, threading import matplotlib.pyplot as plt from math import sqrt from watson_developer_cloud import NaturalLanguageUnderstandingV1 import watson_developer_cloud.natural_language_understanding.features.v1 as Features api = TwitterAPI(consumer_key='', consumer_secret='', access_token_key='', access_token_secret='') natural_language_understanding = NaturalLanguageUnderstandingV1( version='2017-02-27', username="", password="") #ghetto config file LOCAL_CSV_FILE = 'output.csv' user_depth = 5 startTweetID = 926311454810112000 class Application(tk.Frame): def createDefaultWidgets(self): self.companybutton = tk.Button(self, text="Company") self.companybutton.pack()
from TwitterAPI import TwitterAPI from message import RetweetMessage from news_bot_config import NewsBotConfig # env_vars stage = os.environ['Stage'] config_bucket = os.environ['ConfigBucket'] config_key_name = os.environ['ConfigKeyName'] consumer_key = os.environ['TwitterConsumerKey'] consumer_secret = os.environ['TwitterConsumerSecret'] access_token_key = os.environ['TwitterAccessTokenKey'] access_token_secret = os.environ['TwitterAccessTokenSecret'] # api clients twitter = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret) def lambda_handler(event, _): config = NewsBotConfig.initialize(stage, config_bucket, config_key_name) return handle(event, config) def handle(event: dict, config: NewsBotConfig): records = event['Records'] ids = [] for r in records: mid = r['Sns']['MessageId'] ids.append(mid) config.logger.info( json.dumps({
from Utils import Utils if __name__ == '__main__': # 引数チェック param = sys.argv if (len(param) != 2): print(('Usage: $ python ' + param[0] + ' @screen_name')) exit() # スクリーンネームを設定 screen_name = param[1].replace('@', '') print('Screen name: @' + screen_name) # クラスを初期化 instance = TwitterAPI() # アカウントのツイート数を取得 user_tweets_count = instance.get_user_tweets_count(screen_name) # アカウントの全ツイートを再帰的に取得 user_timeline = instance.get_user_timeline(screen_name) # ファイルに書き込む file_name = os.path.dirname(os.path.abspath(__file__)).replace( '\\', '/') + f'/pasttweets/{screen_name}.txt' file_contents = '' for index, tweet in enumerate(user_timeline): # ツイートの ID と本文 tweet_id = tweet['status']['data']['id_str']
import json import pprint import sys from TwitterAPI import TwitterAPI from sys import argv # import numpy as np # import matplotlib.pyplot as plt # import matplotlib try: keyword = argv[1] api = TwitterAPI("insert appropriate keys here.") tweets = [] #print "Starting Twitter miner" r = api.request('statuses/filter', {'track': keyword}) for item in r.get_iterator(): try: print json.dumps(item, sort_keys=True) except: pass except KeyboardInterrupt: #print('\nTerminated by user') for sample in tweets: try: print sample['text'] + "end" except: pass #print "Exiting" except Exception as e: print('*** STOPPED %s' % str(e))
from TwitterAPI import TwitterAPI from config import get as get_config config = get_config() # Setup twitter api credentials api = TwitterAPI(config['twitter']['consumer_key'], config['twitter']['consumer_secret'], config['twitter']['access_token_key'], config['twitter']['access_token_secret']) def tweet(text): # Tweet the event if localMode is false in config if config['main']['verbose'] == True: print('Twitter.py - Tweet: ' + text) if config['main']['localMode'] == False: r = api.request('statuses/update', {'status': text[:config['twitter']['maxChars']]}) return r.json()['id_str'] def udpate_name(iteration_number): new_name = config['twitter']['baseName'] + ' #' + str(iteration_number) if config['main']['verbose'] == True: print('Twitter.py - Update account name: ' + new_name) if config['main']['localMode'] == False:
'-text', metavar='NAME_VALUE', type=str, help='text to send', default='My picture') args = parser.parse_args() try: oauth = TwitterOAuth.read_file(args.oauth) file = open(args.file, 'rb') data = file.read() file.close() api = TwitterAPI(oauth.consumer_key, oauth.consumer_secret, oauth.access_token_key, oauth.access_token_secret) r = api.request('statuses/update_with_media', {'status':args.text}, {'media[]':data}) print(r.status_code) except KeyboardInterrupt: print('Terminated by user') except Exception as e: print('STOPPED: %s' % e)
from TwitterAPI import TwitterAPI twitter_api = TwitterAPI(consumer_key='xxxxxx', consumer_secret='xxxxxx', access_token_key='xxxxxx', access_token_secret='xxxxxx') filters = {"track":["Lava Jato"]} stream = twitter_api.request('statuses/filter',filters).get_iterator() for item in stream: print item
#import checkDMs as checkDMs import weather as weather import getWordOfDay as word import traceback import sys import time #@3030team3 consumer_key = "ThbfGVBrpRwMKu9FVgR6HjA1m" consumer_secret = "lGyzD69pQGupB4lTG3jWG8rszYmVN4CGjFPYGUBTr1EhKdiBxh" access_key = "1039183691510165505-IkoKTm8MopQ3PzYVmEgU2NdGmognPL" access_secret = "jwWe8WqunRcmqKWgvYyDuUjkyotgfUddeLKcTHYz40ktP" twitterID = "1039183691510165505" MAX_SLEEP_TIME_BETWEEN_EVENTS = 1 #Event if an event is longer than this time away, the script will recheck for newer events after this minutes. api = TwitterAPI(consumer_key, consumer_secret, access_key, access_secret) tryCounter = 0 def tick(): #print(database.addMessage("sendDM.py", "online", "Script sendDM.py is still working.")) global tryCounter tryCounter = tryCounter + 1 print("Next event triggers at " + str(getNextTime(True))) pause.until(getNextTime(True)) event = database.nextEvent() if getNextTime(False) < datetime.datetime.now(): print(event) try: if (event["eventType"] == "localWeather"):
import json import socket from TwitterAPI import TwitterAPI if __name__ == "__main__": consumer_api_key = "heaViJzA1YzxP8p1fMVnkG7C7" consumer_api_secret = "OD9OaDIm7K5ab4zMSisSE1eW3o7cjulhfj5CwV3jmxjJfEPRus" access_api_key = "230085799-4kYtfW4OKceG6YdXEqEnrvqKAdjGXIdv4iRxoWTq" access_api_secret = "3zBMj7bcX1giVqvWMNuVVE8ghF5j8AlndaLjftUWwLR5o" api = TwitterAPI(consumer_api_key, consumer_api_secret, access_api_key, access_api_secret) TRACK_TERM = '#' sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = ('192.168.56.1', 7777) sock.connect(server_address) host = '192.168.56.1' port = 7777 addr = (host, port) sock.bind((host, port)) sock.listen(5) r = api.request('statuses/filter', {'track': TRACK_TERM}) for item in r: result = json.dumps(item) m1 = json.loads(result) m2 = m1['text'].encode('utf-8') sock.send(m2) # to make this work
def get_twitter(): return TwitterAPI(consumer_key, consumer_secret, access_token, access_token_secret)
from TwitterAPI import TwitterAPI consumer_key = '' consumer_secret = '' api = TwitterAPI(consumer_key, consumer_secret, auth_type='oAuth2') response = api.request('users/show', {'screen_name': 'tiger'}) print("status", response.status_code) print("headers", response.headers) import json print(json.dumps(response.json(), indent=2))
#author Rohan Subramaniam from __future__ import print_function import json from TweetInfo import TweetInfo from TwitterAPI import TwitterAPI from alchemyapi import AlchemyAPI from TwitterKeys import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET) Team1 = 'Arsenal' count1 = 50 until = "2015-10-06" lang = 'en' geocode1 = "51.5072,0.1275,100mi" #Geocode of london since = '2015-10-04' team1_text = "" team1_list = [] r = api.request( 'search/tweets', { 'lang': lang, 'q': Team1, 'count': count1, 'geocode': geocode1, 'since': since, 'until': until })
import os from TwitterAPI import TwitterAPI CONSUMER_KEY = os.environ['CONSUMER_KEY'] CONSUMER_SECRET = os.environ['CONSUMER_SECRET'] OAUTH_TOKEN = os.environ['OAUTH_TOKEN'] OAUTH_SECRET = os.environ['OAUTH_SECRET'] api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN, OAUTH_SECRET)
muestreo = np.array([par.split(' | ') for par in muestreo]) alfabeto = muestreo[:, 0].ravel() probabilidades = muestreo[:, 1].ravel() alfabeto = ' | '.join(alfabeto) probabilidades = ' | '.join( [str(int(i) / frecuencia_total) for i in probabilidades]) muestreo = leerarchivo('resources/lexico.txt') lexico = [par.split(',') for par in muestreo] consumer_key = 'kxfJjFCXjkRySLkW2aHGeAXxN' consumer_secret = 'VKalY6au6029H5uqo63VHH1VWcYwaBmlJ36EPulYUBmThyvDUi' access_key = '1576798795-MJcRA8Yu8nfgDWbIQjshgio6bOoBCBOGZbSOF06' access_secret = 'jPVa8ELVIDT2StlNJvts6UmZASllsliVdvHg7VikT88ew' api = TwitterAPI(consumer_key, consumer_secret, access_key, access_secret) respuesta = api.request('search/tweets', { 'q': 'abarca', 'count': '100', 'lang': 'es' }) muestreo = [] for item in respuesta.get_iterator(): texto = item['text'].encode('latin-1', 'ignore') muestreo.append(texto.decode('latin-1')) try: inst = CodigoAritm(alfabeto, probabilidades) except SimbProbsError as e: print(e) except ItemVacioError as e:
import json import argparse import time from TwitterAPI import TwitterAPI import config import utils API = 'statuses/user_timeline' api = TwitterAPI(config.API_KEY, config.API_SECRET_KEY, config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET) def parse_arg(): args = argparse.ArgumentParser(description="get user timeline.") args.add_argument("-f", "--filename", type=str, help="specify output JSON filename.") args.add_argument("-u", "--user_id", type=int, help="specify user by user_id.") args.add_argument("-s", "--screen_name", type=str, help="specify user by screen_name.") args.add_argument("-n", "--num_tweets", type=int, default=200,
# -*- coding: utf-8 -*- #!/usr/bin/python from TwitterAPI import TwitterAPI import sys import time from auth_robotgimse import (consumer_key, consumer_secret, access_token, access_token_secret) stringToTrack = "#test12345" tweet_count = 0 api = TwitterAPI(consumer_key, consumer_secret, access_token, access_token_secret) while True: r = api.request('statuses/filter', {'track': stringToTrack}) print('Twitter ready!') print(stringToTrack) print("----") for item in r: tweet = item['text'] user = item['user']['screen_name'] print(user) tweet_count = tweet_count + 1
db.tweet_raw_data.create_index([('id_str', pymongo.ASCENDING)], unique=True) #In[]: #ดึงข้อมูล keyword ที่อยู่ใน json with io.open("keywordSearch.json", encoding="utf-8") as json_file: twData = json.load(json_file) #In[]: #ประกาศ API ที่ใช้งาน api = [] with io.open("searchAPI.json", encoding="utf-8") as json_file: apiData = json.load(json_file) for allApi, token in apiData.items(): api.append( TwitterAPI(token['consumer_key'], token['consumer_secret'], token['access_token_key'], token['access_token_secret'])) #In[]: def getRawTwitter(): rawData = [] for i in range(len(api)): try: t0 = time.time() for key, value in twData.items(): for val in value: kw = val + " -filter:retweets" r = api[i].request( 'search/tweets', { 'q': kw, 'lang': 'th',
from TwitterAPI import TwitterAPI import json import urllib.parse import pandas as pd import authentication as auth from pandas.io.json import json_normalize api = TwitterAPI(auth.consumer_key, auth.consumer_secret, auth.access_token, auth.token_secret) # With sandbox, our query can be 256 characters long -- noting if we ever encounter weird error params = { "query": "((we\'ll OR (we will)) donate for (every OR each) (retweet OR RT)) OR (for (every OR each) (RT OR retweet) this tweet gets ((we will) OR we\'ll) donate) OR (for (every OR each) (RT OR retweet) ((we will) OR we\'ll) donate)" } # do the parentheses check out? #yeah i tink so yeah looks good # YES for now HYPEEE # should we try this? I'm sure there are others but let's see if it works for now # RT / retweet; we'll / we will; every / each # is there a clean way to say "for each RT/retweet"? # yeah lets do that thats sMORT # maybe "for each (RT OR retweet) (we\'ll OR (we will)) donate" #print ((params["query"])) #print (len(params["query"])) request = api.request('tweets/search/30day/:Development', params) # Status code will tell us if it worked - 200 means its gucci
tweet_number = 0 if availableMicrobit: ser = Serial(microbitPort, microbitBaud, timeout=3) sleep(microbitWaitTime) if testSerial: print "Send to micro:bit" sleep(2) ser.write('\r\n') else: print 'Twitter connection!' try: api = TwitterAPI(consumer_key, consumer_secret, access_token, access_token_secret) print 'Twitter ready!' r = api.request('statuses/filter', {'track': stringToTrack}) for item in r.get_iterator(): if 'text' in item: #print item['user']['screen_name'].encode('utf-8') + ' tweeted: ' + item['text'].encode('utf-8') if availableMicrobit: #print "New tweet send to micro:bit with " + stringToTrack #ser.write(bytes(1)) # The command is a simple byte intepretation of the integer 1 tweet = item['text'].encode('utf-8').strip( stringToTrack) + '\r\n' #ser.write(tweet)
from sklearn import svm from sklearn.feature_extraction.text import * from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import auc, roc_curve import matplotlib.pyplot as plt import matplotlib matplotlib.use('Agg') t1 = datetime.now() api = TwitterAPI('', '', '', '') def load_data(filename): data = [] op = open(filename, "r") for txt in op: data.append(txt.rstrip()) return data def stopwords_removal(current_twt): updated_twt = ' ' current_twt = current_twt.lower() for twt_word in current_twt.encode("utf-8").split(): if twt_word not in stopwords:
from TwitterAPI import TwitterAPI import os CONSUMER_KEY = os.environ.get('CONSUMER_KEY', None) CONSUMER_SECRET = os.environ.get('CONSUMER_SECRET', None) ACCESS_TOKEN = os.environ.get('ACCESS_TOKEN', None) ACCESS_TOKEN_SECRET = os.environ.get('ACCESS_TOKEN_SECRET', None) #The environment name for the beta is filled below. Will need changing in future ENVNAME = os.environ.get('ENVNAME', None) WEBHOOK_URL = os.environ.get('WEBHOOK_URL', None) twitterAPI = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET) r = twitterAPI.request('account_activity/all/:%s/webhooks' % ENVNAME, {'url': WEBHOOK_URL}) print(r.status_code) print(r.text)
class Twitter: """ Simplified proxy to TwitterAPI implementing functions required by the application """ def __init__(self, config): self.__api = TwitterAPI(config.get("twitter", "twitter.consumer_key"), config.get("twitter", "twitter.consumer_secret"), config.get("twitter", "twitter.access_token"), config.get("twitter", "twitter.access_secret")) def verify_credentials(self): """ Validates current API credentials :return: True if an HTTP status code of 200 is returned from Twitter """ r = self.__api.request('account/verify_credentials') return r.status_code == 200 def tweet_text(self, status, respond_to_user=None, respond_to_id=None): """ Sends a text-only tweet :param status: The tweet text to be posted :param respond_to_user: User being responded to. None if not a response. :param respond_to_id: Tweet being responded to. None if not a response. :return: True if the Twitter API returned an HTTP 200 status. """ if respond_to_user is not None: status = "Hi, %s\n\n%s"%(respond_to_user, status) r = self.__api.request('statuses/update', {'status': status, 'in_reply_to_status_id': respond_to_id}) print('UPDATE STATUS SUCCESS' if r.status_code == 200 else 'UPDATE STATUS FAILURE: ' + r.text) def tweet_image(self, title, source, shortened_image_link, image_path="image.jpg", respond_to_user=None, respond_to_id=None): """ Tweets an image and Flickr photo title :param title: The photo title :param source: The Flickr source dict :param shortened_image_link: A URL to the source image on Flickr :param image_path: A path to the image to be tweeted :param respond_to_user: A user to be responded to. None if not a response. :param respond_to_id: ID of the tweet being responded to. None if not a response. :return: True if the Twitter API returned an HTTP 200 status. """ username = source.get_flickr_username() twitter_id = source.get_twitter_id() if respond_to_user is None: text = "%s - From %s (%s) - %s"%(title, username, twitter_id, shortened_image_link) else: text = "Hi, %s\n\n%s - From %s (%s) - %s" % (respond_to_user, title, username, twitter_id, shortened_image_link) data = Util.load_image_data(image_path) r = self.__api.request('media/upload', None, {'media': data}) print('UPLOAD MEDIA SUCCESS' if r.status_code == 200 else 'UPLOAD MEDIA FAILURE: ' + r.text) if r.status_code == 200: media_id = r.json()['media_id'] r = self.__api.request('statuses/update', {'status': text, 'media_ids': media_id, 'in_reply_to_status_id': respond_to_id}) print('UPDATE STATUS SUCCESS' if r.status_code == 200 else 'UPDATE STATUS FAILURE: ' + r.text) def get_mentions(self, since_id=None, count=100): """ Retrieves a list of recent Twitter mentions since the ID of the provided tweet. :param since_id: An identifier of the last reviewed mention (on last run) :param count: A maximum of mentioned to be returned by the API :return: A list of tweets """ params = {'count':count} if since_id is not None and since_id > 0: params["since_id"] = since_id r = self.__api.request('statuses/mentions_timeline', params) if r.status_code != 200: print('retrieval failure: ' + r.text) raise Exception('retrieval failure: ' + r.text) return r.json()
def __init__(self, config): self.__api = TwitterAPI(config.get("twitter", "twitter.consumer_key"), config.get("twitter", "twitter.consumer_secret"), config.get("twitter", "twitter.access_token"), config.get("twitter", "twitter.access_secret"))
import os import pdb from django.shortcuts import render from django.template import loader, Context from django.http import HttpResponse from django.template.context_processors import csrf from .forms import DateForm from TwitterAPI import TwitterAPI, TwitterRestPager from datetime import datetime api = TwitterAPI(os.environ['consumer_key'], os.environ['consumer_secret'], os.environ['access_token_key'], os.environ['access_token_secret']) # Create your views here. def index(request): return render(request, 'index.html', {}) def search(request): # form = DateForm(request.GET) query = request.GET['q'] date = request.GET['date'] formatted_date = datetime.strptime(date, '%m/%d/%Y').strftime('%Y-%m-%d') # json_string = api.request('search/tweets', {'q': '%23' + query + ' since:2016-05-11'}) # decoded_json = json_string.json()
def main(): config = configparser.ConfigParser() config.read("twitter.cfg") consumer_key = config.get('twitter', 'consumer_key') consumer_secret = config.get('twitter', 'consumer_secret') access_token = config.get('twitter', 'access_token') access_token_secret = config.get('twitter', 'access_token_secret') api = TwitterAPI(consumer_key, consumer_secret, access_token, access_token_secret) mClient = MongoClient() new_data = mClient['new_data'] old_data = mClient['old_data'] print("Twitter API connection and Mongodb connection ready") for collection_name in ['bots', 'humans']: users_old = old_data[collection_name] all_users = read_users(users_old) active_users_new = get_users(all_users, api) inactive_users = [ r for r in all_users if r not in [u['id'] for u in active_users_new] ] print("Active %s fetched with Twitter API" % collection_name) users_new = new_data[collection_name] for user in active_users_new: users_new.update({'id': user['id']}, {'$set': user}) for user_id in inactive_bots: users_new.update({'id': user_id}, {'$set': {'active': False}}) print("Basic information of %s written to database" % collection_name) for collection_name in ['bots', 'humans', 'new_users']: while True: user = new_data[collection_name].find_one( { 'protected': False, 'timeline': { '$exists': 0 } }, {'id': 1}) if user == None: break get_timeline(user['id'], api, new_data[collection_name]) print("Info about timeline of each %s written to database" % collection_name) for collection_name in ['bots', 'humans', 'new_users']: while True: user = new_data[collection_name].find_one( { 'tweets_sim': { '$exists': 0 }, 'timeline': { '$exists': 1 } }, { 'id': 1, 'timeline': 1 }) if user == None: break tweets = [ r['text'] for r in user['timeline'] if r['is_reply'] == False and r['is_rt'] == False ] if len(tweets) < 2: sim = [0, 0, 0] else: sim = np.array([0, 0, 0]) for i in list(range(len(tweets) - 1)): for j in list(range(i + 1, len(tweets))): sim = sim + np.array(calc_sim(tweets[i], tweets[j])) sim = sim / ((len(tweets) + 1) * len(tweets) / 2) new_data[collection_name].update_one( {'id': user['id']}, {'$set': { 'tweets_sim': list(sim) }}) for collection_name in ['bots', 'humans']: while True: user = old_data[collection_name].find_one( { 'tweets_sim': { '$exists': 0 }, 'timeline': { '$exists': 1 } }, { 'id': 1, 'timeline': 1 }) if user == None: break tweets = [ r['text'] for r in user['timeline'] if r['text'][:2] != 'RT' ] if len(tweets) < 2: sim = [0, 0, 0] else: sim = np.array([0, 0, 0]) for i in list(range(len(tweets) - 1)): for j in list(range(i + 1, len(tweets))): sim = sim + np.array(calc_sim(tweets[i], tweets[j])) sim = sim / ((len(tweets) + 1) * len(tweets) / 2) old_data[collection_name].update_one( {'id': user['id']}, {'$set': { 'tweets_sim': list(sim) }}) mClient.close()
import os import time from TwitterAPI import TwitterAPI from gpiozero import MotionSensor pir = MotionSensor(4) b = 1 CONSUMER_KEY = '' CONSUMER_SECRET = '' ACCESS_TOKEN_KEY = '' ACCESS_TOKEN_SECRET = '' api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET) while True: if pir.motion_detected: img = "/home/pi/cam/" + str(b) + ".jpg" cmd = "fswebcam -F 5 --fps 20 -r \"1200x800\" " + img os.system(cmd) print("pic taken") file = open(img, 'rb') data = file.read() r = api.request('statuses/update_with_media', {'status': '#pyTweetCMR'}, {'media[]': data}) print(r.status_code) b = b + 1
ACCESS_TOKEN_SECRET = config.get(CURRENCY, 'access_token_secret') # Connect to Nano node rpc = nano.rpc.Client(NODE_IP) # Connect to Telegram if TELEGRAM_KEY != 'none': telegram_bot = telegram.Bot(token=TELEGRAM_KEY) # Connect to Twitter auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = tweepy.API(auth) # Secondary API for non-tweepy supported requests twitterAPI = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET) def validate_checksum_xrb(address: str) -> bool: """Given an xrb/nano/ban address validate the checksum""" if (address[:5] == 'nano_' and len(address) == 65) or (address[:4] in ['ban_', 'xrb_'] and len(address) == 64): # Populate 32-char account index account_map = "13456789abcdefghijkmnopqrstuwxyz" account_lookup = {} for i in range(0, 32): account_lookup[account_map[i]] = BitArray(uint=i, length=5) # Extract key from address (everything after prefix) acrop_key = address[4:-8] if address[:5] != 'nano_' else address[5:-8] # Extract checksum from address acrop_check = address[-8:]
# -*- coding:utf-8 -*- from TwitterAPI import TwitterAPI, TwitterPager SCREEN_NAME = 'tianhel1' SEARCH_TERM = '#Trump' TWEET_ID = '1311633424428404736' consumer_key = "" consumer_secret = "" api = TwitterAPI(consumer_key, consumer_secret, auth_type='oAuth2') #This shows all the information of a certain user r = api.request('users/lookup', {'screen_name': SCREEN_NAME}) print(r.json() if r.status_code == 200 else 'PROBLEM: ' + r.text) #get tweet by ID r = api.request('statuses/show/:' + TWEET_ID) tweet = r.json() print(tweet['user']['screen_name'] + ':' + tweet['text'] if r.status_code == 200 else 'PROBLEM: ' + r.text) #Search 10 tweets about Trump r = api.request('search/tweets', {'q': SEARCH_TERM, 'count': 10}) for item in r: print(item['text'].encode('gbk', 'backslashreplace'). decode('gbk', 'backslashreplace') if 'text' in item else item)