def search(): tw = [] try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords([" "]) # let's define all search keywords - now, we want all tweets with a space in them tso.set_language("en") # we want to see english tweets only tso.set_include_entities(False) # and don't give us all those entity information ts = TwitterSearch( consumer_key="zg9yQTGTT2oizk3XLMHGLzfpJ", consumer_secret="nmiwqRpWDX0oxTCUTro8sPeUVUXIZHW9O1VZcTb0mLyfHw51sc", access_token="700001043-oxm3LZ72y4WmWGRqY66QjV0SzZoHGy5OGgwic26M", access_token_secret="hGJZWTb5bjGFSiuIQrff5UajKdlyXcp7Lyun5SJzq05Su", ) i = 0 for tweet in ts.search_tweets_iterable(tso): # if (tweet['retweet_count'] != 0): # tw.append((len(tweet['text'].split()), tweet['retweet_count'])) # print(str(i)) # backspace(len(str(i))) tw.append(tweet) if i == 300: break i += 1 return tw # print tw # print( '%s: @%s tweeted: %s' % ( tweet['retweet_count'], tweet['user']['screen_name'], tweet['text'] ) ) # print # of retweents, tweeter, and content of tweet except TwitterSearchException as e: # take care of all those ugly errors if there are some print (e)
def recent_tweets(term,amt): #takes in term and try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords(term) # let's define all words we would like to have a look for tso.setLanguage('en') # we want to see German tweets only tso.setCount(7) # please dear Mr Twitter, only give us 7 results per page tso.setIncludeEntities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = 'anOyC9WPt8qP82BkKGt34A', consumer_secret = 'FzAFLwXEunP34fwu3VItB3zr1P8MTOg4URuNVEI1U', access_token = '307461472-FZDgkwOuqLnKXYUtUaJzyJYZpFp1Nhy4IrlBURz1', access_token_secret = 'hoiFrBIe85VbtyMbYcxrXjbFhqUF4a6Qjolw5qbKXc' ) tweet_count = 0 at_count = 0 hash_count = 0 for tweet in ts.searchTweetsIterable(tso): for char in tweet['text']: if char =="@": at_count +=1 if char == "#": hash_count +=1 tweet_count+=1 #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) if tweet_count >=amt: break #print tweet_count, at_count, hash_count return tweet_count, at_count, hash_count except TwitterSearchException as e: # take care of all those ugly errors if there are some #print(e) print "Over-exerting Twittter!! Come back in a few, you bad, bad warrior."
def getTweets(username): tFeeds=[] try: #tuo = TwitterUserOrder(username) # create a TwitterUserOrder tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords([username]) tso.set_language('en') tso.set_count(50) tso.set_include_entities(False) tso.set_until(date.today()-timedelta(days=2)) # it's about time to create TwitterSearch object ts = TwitterSearch( consumer_key = '%%%', consumer_secret = '^^^', access_token = '&&&', access_token_secret = '@@@' ) # start asking Twitter counter=0 for tweet in ts.search_tweets_iterable(tso): if (counter==300): break tweetx=str(tweet['text'].encode('ascii', 'ignore')) counter=counter+1 tFeeds.append(tweetx) except TwitterSearchException as e: # catch all those ugly errors print(e) return tFeeds
def get_tweets(): """Fetches up to 10 tweets and returns their text in a list""" hashtag = "maker" sources = [] try: tso = TwitterSearchOrder() tso.setSearchURL("?q=%23" + hashtag) tso.setLocale('en') tso.setCount(10) tso.setIncludeEntities(False) twitter_search = TwitterSearch( consumer_key = Secrets.consumer_key, consumer_secret = Secrets.consumer_secret, access_token = Secrets.access_token, access_token_secret = Secrets.access_token_secret ) tweets = twitter_search.searchTweets(tso) for tweet in tweets['content']['statuses']: sources.append(tweet['text']) except TwitterSearchException as exception: print(exception) return sources
def twit_search(keywords): try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords(keywords) # let's define all words we would like to have a look for tso.setLanguage('en') # we want to see English tweets only tso.setCount(7) # please dear Mr Twitter, only give us 1 result per page tso.setIncludeEntities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = 'asX13sgNL5fVbVfSwyaLCw', consumer_secret = 'Y0SkBfcxZ5Q4AVmmXEMCcWI5lfUD3JBdgtd1fioJwU', access_token = '956472907-NGjoV82C6UwGu4xXLod1R3SKsWG9hfCXntt8Smxr', access_token_secret = '98S3jvUx5TZQxHYfBcP971ow02mTzeyQUdILamHp3Oee1' ) # for tweet in ts.searchTweetsIterable(tso): # this is where the fun actually starts :) # return '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) tweets = [] base_url = "https://twitter.com" for tweet in ts.searchTweetsIterable(tso): tweets.append({"screen_name": tweet['user']['screen_name'], "text": tweet['text'], "full_name": tweet['user']['name'], "url": "/".join([base_url, tweet['user']['screen_name'], "status", tweet['id_str']]), "created_at": tweet['created_at'] }) if len(tweets) >= 6: break return tweets except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) return []
def mainLoop(): try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords([keyword1]) # let's define all words we would like to have a look for tso.set_language('en') # we want to see English tweets only tso.set_include_entities(False) # and don't give us all the entity information # create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = consumerKey, consumer_secret = consumerSecret, access_token = accessToken, access_token_secret = accessSecret ) # call API print "Checking for new tweets that match keywords: %s or %s" % (keyword1,keyword2) for tweet in ts.search_tweets_iterable(tso): # bind variables to information from tweets we're interested in username = (tweet['user']['screen_name']).encode('ascii', 'replace') tweetText = (tweet['text']).encode('ascii', 'replace') date = (tweet['created_at']).encode('ascii', 'replace') if isStringinCSV([username, tweetText, date]) == False: # check to see if individual tweet from TwitterSearch object is in our log print "New Tweet!" writeToCSV([username, tweetText, date]) # if so, write to log print "Check complete." except TwitterSearchException as e: # take care of all those ugly errors if there are any print(e)
def jobInteraction(tag): tags = tag.split(",") tweets = [] try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(tags) # let's define all words we would like to have a look for tso.set_language('es') # we want to see German tweets only tso.set_include_entities(True) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( # consumer_key='QVhq5HVolTuzE79c16YDegtia', # consumer_secret='bfSPCAKXca52eaa2GF4a4mGceYVy4VkENwSuQtnr2c9e34TgWq', # access_token='1196870581-DfDo1GQQaukRZQBWn72ugdATSJqKPb4EaMsOFRK', # access_token_secret='tRV1lizrsCj8maKxOkzcDvp6vGJLBgDXH0ueEzmXSQTOi' consumer_key='gDEFFAToqZ1j5cE9SgJkeqvBY', consumer_secret='jqKGAra9Kd0n4jwsQXkhairyxx0uv9D4iMme6AeE2NLDX3fPfz', access_token='17160146-FxfSx4Bdq7SvuENSgHvi175f7uyjwoHCHVMUYiJQP', access_token_secret='SREyq0DxHOurUY5E0AbT3kPDwl5IFDcPFmnehZjbaH5ab' ) # this is where the fun actually starts :) for tweet in ts.search_tweets_iterable(tso): # print('@%s tweeted: %s' % (tweet['user']['screen_name'], tweet['text'])) tweets.append(tweet) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) print len(tweets) return json.dumps(tweets)
def get(self, keyword=None, lang="en", max=20): if (keyword): try: tso = TwitterSearchOrder() tso.set_keywords([keyword]) tso.set_language(lang) tso.set_include_entities(False) ts = TwitterSearch( Config.settings['twitter']['consumer_key'], Config.settings['twitter']['consumer_secret'], Config.settings['twitter']['access_token'], Config.settings['twitter']['access_token_secret'] ) counter = 0 sleep_at = max if max is not None else 20 sleep_for = 30 for tweet in ts.search_tweets_iterable(tso): #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'].text.encode('utf-8'), tweet['text'].text.encode('utf-8') ) ) print tweet counter += 1 # increase counter if counter >= sleep_at: # it's time to apply the delay counter = 0 break #time.sleep(sleep_for) # sleep for n secs except TwitterSearchException as e: abort(500) return False
def search_results(words): #auth = OAuthHandler(keys.consumer_key, keys.consumer_secret) #auth.set_access_token(keys.access_token, keys.access_token_secret) try: tso = TwitterSearchOrder() tso.set_keywords(words) rilo_twitter = TwitterSearch( consumer_key = keys.consumer_key, consumer_secret = keys.consumer_secret, access_token = keys.access_token, access_token_secret = keys.access_token_secret ) for tweet in rilo_twitter.search_tweets_iterable(tso): if tweet['retweet_count'] > 150: #API.retweet(tweet['id']) print('@%s tweeted: %s' % (tweet['user']['screen_name'], tweet['text'])) print('\n\n\n') except TwitterSearchException as e: print (e)
def get_twitters(): twitters = [] tags = request.args.get('hashtags',False).replace('#','%23').replace('@','%40').replace(' ','').split(',') try: if len(tags)>=1: search_settings = TwitterSearchOrder() # create a TwitterSearchOrder object search_settings.set_include_entities(False) # and don't give us all those entity information search_settings.set_keywords( tags ) # let's define all words we would like to have a look for # it's about time to create a TwitterSearch object with our secret tokens search_on_twitter = TwitterSearch( consumer_key = APP_KEY, consumer_secret = APP_SECRET, access_token = OAUTH_TOKEN, access_token_secret = OAUTH_TOKEN_SECRET ) # this is where the fun actually starsearch_on_twitter :) for twitter in search_on_twitter.search_tweets_iterable(search_settings): tw = { 'text' : twitter['text'], 'profile_image_url' : twitter['user']['profile_image_url'], 'name' : twitter['user']['name'], 'screen_name' : twitter['user']['screen_name'], 'location' : twitter['user']['location'] } if tw not in twitters: twitters.append( tw ) # return json return json.dumps(twitters) else: return json.dumps([]) except TwitterSearchException as e: # take care of all those ugly errors if there are some return json.dumps([])
def search(self): done = False try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords(self.keywords) # let's define all words we would like to have a look for tso.setLanguage('en') # we want to see English tweets only tso.setCount(100) # give 7 results per page tso.setIncludeEntities(False) # and don't give us all those entity information tso.setGeocode(53.3333328, -8.0, 300, True) tso.url # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key=self.config.get('twitter_keys', 'consumer_key'), consumer_secret=self.config.get('twitter_keys', 'consumer_secret'), access_token=self.config.get('twitter_keys', 'access_token'), access_token_secret=self.config.get('twitter_keys', 'access_token_secret') ) count = 0 for tweet in ts.searchTweetsIterable(tso): # save to db count += 1 self.io.write_tweet(tweet) print 'Search complete.. flushed %d tweets into db.'%count except TwitterSearchException as e: # take care of all those ugly errors if there are some print 'haha' print(e)
def fetch_tweets(search_request): """ fetches tweets from Twitter API extracts urls and updates db """ try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords([search_request]) # define search request tso.setCount(settings.tweets_per_page) # only results_per_page tso.setIncludeEntities(True) # give us entity information # create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key=twitter.TWITTER_CONSUMER_KEY, consumer_secret=twitter.TWITTER_CONSUMER_SECRET, access_token=twitter.TWITTER_ACCESS_TOKEN, access_token_secret=twitter.TWITTER_ACCESS_TOKEN_SECRET ) ts.authenticate() # user must authenticate first tweets = ts.searchTweetsIterable(tso) found_urls = extract_urls(tweets) search_keyword_object = SearchKeyWord() search_keyword_object.gifs = found_urls search_keyword_object.search_keyword = search_request search_keyword_object.updated_at = datetime.now() print(search_keyword_object) search_keyword_object.save() return found_urls except TwitterSearchException, e: # to take care of errors message = e.message
def pull_tweet_responses(username, tweet_id): """ Queries twitter for tweets mentioning user_id and afer tweet_id checks to see if found tweets are in response to tweet_id if response and not RT, saves relevant details to SQL database :param username: :param tweet_id: """ try: tso = TwitterSearchOrder() tso.set_keywords(['@' + username]) tso.set_language('en') tso.set_since_id(tweet_id) ts = TwitterSearch( consumer_key=api_key, consumer_secret=api_secret, access_token=access_token_key, access_token_secret=access_token_secret ) for tweet in ts.search_tweets_iterable(tso): if tweet['in_reply_to_status_id'] == tweet_id and \ tweet['text'][:2] != 'RT': write_response_to_mysql(tweet) except TwitterSearchException as e: print('\nTweet id: ' + str(tweet_id)) print(e)
def get_new_tweets(self, keywords: list) -> None: ''' Use the TwitterSearch lib to fetch tweets that match the given keywords. Pass tweets to the _store method to update the database. ''' tweets = [] if self.DEBUG: print("Searching for tweets with {} as keywords.".format(keywords)) # DEBUG try: tso = TwitterSearchOrder() tso.setKeywords(keywords) tso.setLanguage('en') tso.setCount(1) tso.setIncludeEntities(False) ts = TwitterSearch( consumer_key = 'YOUR STUFF HERE', consumer_secret = 'YOUR STUFF HERE', access_token = 'YOUR STUFF HERE', access_token_secret = 'YOUR STUFF HERE' ) ts.authenticate() for tweet in ts.searchTweetsIterable(tso): tweets.append(tweet) except TwitterSearchException as e: self.report_error(["TwitterSearchException",e]) if self.DEBUG: print("Fetched {} new tweets with {} as keywords.".format(len(tweets),keywords)) # DEBUG self._store(tweets, keywords)
def twitterStreaming(): from time import sleep sleep(5) try: # it's about time to create a TwitterSearch object with our secret tokens tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(['Swissquote']) # let's define all words we would like to have a look for tso.set_language('en') # we want to see German tweets only tso.set_include_entities(False) # and don't give us all those entity information lastID = 569803155141206016 tso.set_since_id(lastID) ts = TwitterSearch( consumer_key = 'a', consumer_secret = 'a', access_token = 'a-a', access_token_secret = 'b' ) for tweet in ts.search_tweets_iterable(tso): print( '[%s]@%s tweeted: %s' % ( tweet['created_at'], tweet['user']['screen_name'], tweet['text'] ) ) if(lastID < tweet['id']): lastID = tweet['id'] serveurStreaming.send(tweet['text']) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) serveurStreaming.isConnectionAlive = False
def perform_search(request): """Create a page that counts hashtags""" tag_to_search = "" if request.method == "POST": tag_to_search = request.POST["search"] keyword = '"#' + tag_to_search + '"' users = [] postCount = 0 hashCount = Counter() uniqueHashCount = Counter() # Now try and talk to twitter try: tso = TwitterSearchOrder() tso.set_keywords([keyword]) # This is the value we search for tso.set_include_entities(True) # This is to include entity information, like Hashtags # This is the actual search. Secrets and key's have to be obtained from twitter, and aren't to be shared. ts = TwitterSearch(consumer_key="xxx", consumer_secret="yyy", access_token="qqq", access_token_secret="rrr") # This iterates through the found tweets for tweet in ts.search_tweets_iterable(tso): # count each tweet postCount += 1 # Add the organize and record the tweets for later access add_tweet(tweet, users) # now count them count_hashtags(hashCount, uniqueHashCount, users) new_id = save_data(keyword.upper(), hashCount, uniqueHashCount, postCount, len(users)) # catch errors except TwitterSearchException as e: # take care of all those ugly errors if there are some return str(e) # return that string return search(request, new_id)
def printUser(username): try: tuo = TwitterUserOrder(username) # create a TwitterUserOrder ts = TwitterSearch( consumer_key = '1kj4GBRevJITV4S40kLXGHVG2', consumer_secret = 'c80dJF41IwQV2G4ynR8VYblMQU15M4bc8OFg3aG6l8Y0aoSFhU', access_token = '1708110452-e3unR8gR7WRMGDoCh3aZutMPL3bFBLFlqHz8tzy', access_token_secret = 'kkiZDDp8KXLB8cRDwsMqBDc5IxqiaVXSmbQ2XtZEij0tl' ) def my_callback_closure(current_ts_instance): queries, tweets_seen = current_ts_instance.get_statistics() #print queries, tweets_seen if queries > 0 and (queries % 5) == 0: # trigger delay every 5th query time.sleep(60) # sleep for 60 seconds i = 0 # start asking Twitter about the timeline for tweet in ts.search_tweets_iterable(tuo, callback=my_callback_closure): if i > 50: break #print tweet['user']['screen_name'] content = tweet['text'].encode('utf-8') print content #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'].encode('utf-8') ) ) i += 1 return 1 except TwitterSearchException as e: # catch all those ugly errors print(e) return 0
def search_twitter(credentials,keywords): """Performs a search against the twitter search API. @param dict credentials The auth credentials @param list keywords The list of nkeywords to search @returns TwitterSearch.searchTweetsIterable() The search results """ tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords(keywords) # let's define all words we would like to have a look for #tso.setLanguage('de') # we want to see German tweets only tso.setCount(100) # please dear Mr Twitter, only give us 7 results per page tso.setIncludeEntities(False) # and don't give us all those entity information #tso.setUntil(<datetime.date>) print tso.createSearchURL() # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = credentials["tw_consumer_key"], consumer_secret = credentials["tw_consumer_secret"], access_token = credentials["tw_access_key"], access_token_secret = credentials["tw_access_secret"] ) # for tweet in ts.searchTweetsIterable(tso): # print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) return ts.searchTweetsIterable(tso)
def search(query='cheeky nandos ledge banter', max=5): keywords = query.split() try: tso = TwitterSearchOrder() tso.set_keywords(keywords) # tso.set_language('en') # tso.set_include_entities(False) ts = TwitterSearch( consumer_key=app.config['TWITTER_CONSUMER_KEY'], consumer_secret=app.config['TWITTER_CONSUMER_SECRET'], access_token=app.config['TWITTER_ACCESS_TOKEN'], access_token_secret=app.config['TWITTER_TOKEN_SECRET'] ) results = [] for tweet in ts.search_tweets_iterable(tso): results.append(tweet['id']) # print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) max -= 1 if not max: break # print results return results except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def crawl(filename, keywords, language): f = codecs.open(filename, "a", "utf-8") try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(keywords) # let's define all words we would like to have a look for tso.set_language(language) # we want to see German tweets only tso.set_include_entities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key='MozbqzFag8UQMbuw9qkuyG7Fm', consumer_secret='c4m8EKOwQb90A3nLLySKSEkV7fVXe8taZq4IjgDrMVKihbNW4s', access_token='2684788650-VOzUZGhPItlgye6w5LhX5QMevWLK8WTALcxe8KM', access_token_secret='9IeW0F8XFnZ7FV5sCyZIahLEZBQTkzwO4L0q3vqRkl4je' ) # this is where the fun actually starts :) for tweet in ts.search_tweets_iterable(tso): tweet_text = tweet['text'].replace("\n", " ") id1 = uuid.uuid4() id2 = uuid.uuid4() label = random_label() f.write('@%s\t%s\t%s\t%s\n' % (id1, id2, label, tweet_text)) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) f.close()
def serve_twitter_news(self): try: tso = TwitterSearchOrder() tso.set_keywords([topic]) tso.set_language('en') tso.set_include_entities(False) ts = TwitterSearch( consumer_key = my_consumer_key, consumer_secret = my_consumer_secret, access_token = my_access_token, access_token_secret = my_access_token_secret ) counter = 0 batch_size = 5 updates = [] for tweet in ts.search_tweets_iterable(tso): update = '@%s: %s' % ( tweet['user']['screen_name'].encode('utf-8').strip(), tweet['text'].encode('utf-8').strip() ) updates.append(update) logging.debug(update) counter += 1 if counter >= batch_size: self.send_JSON({ 'update' : updates }) break except TwitterSearchException as e: pass
def retrieveTweets(keyword): try: tso = TwitterSearchOrder() tso.set_keywords([keyword]) tso.set_language('en') tso.set_include_entities(False) ts = TwitterSearch( consumer_key = 'dxDoYB875ZUsvgPtp8EVDkyq6', consumer_secret = '6v4GiG1B3zKmJOsYPEtb0b39lv9da7iu7pIdAANyIoisoNrtZY', access_token = '2157789854-Fwr0uDJQ23twqSyxPEH0VnPwafQvpay8K2z7aFQ', access_token_secret = 'q9S6ECBpBv1RMBG8iNT8cYdoJvQAoIMZfMHAivs5Fh0PQ') htmlstring = "" print "lolpls" i = 0 for tweet in ts.search_tweets_iterable(tso): htmlstring += "<div><strong><a href='http://twitter.com/%s'>@%s</a></strong> %s" % (tweet['user']['screen_name'], tweet['user']['screen_name'], tweet['text']) + '</div>' i += 1 if i > 1: break except TwitterSearchException as e: print(e) return htmlstring
def search(kw): begin = int(request.args['begin']) end = int(request.args['end']) if begin == 0: try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_language('en') tso.set_keywords([kw]) # let's define all words we would like to have a look for tso.set_include_entities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = Tconsumer_key, consumer_secret = Tconsumer_secret, access_token = Taccess_token, access_token_secret = Taccess_token_secret ) # this is where the fun actually starts :) ts.search_tweets(tso) oldTweets = Tweets.get_one(keyword=kw) if oldTweets is None: tweets = Tweets(keyword=kw, tw=ts.get_tweets()['statuses']) tweets.save() else: oldTweets.tw = ts.get_tweets()['statuses'] oldTweets.save() except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) return jsonify(error=1) tweets = Tweets.get_one(keyword=kw).tw return jsonify(tweets=tweets[begin:end])
def city_tweet(): # can't seem to get setGeocode to work via API try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.addKeyword("RT") # tso.setGeocode(41.8819,87.6278,30,'km') # let's define all words we would like to have a look for tso.setLanguage("en") # we want to see German tweets only tso.setCount(7) # please dear Mr Twitter, only give us 7 results per page tso.setIncludeEntities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key="anOyC9WPt8qP82BkKGt34A", consumer_secret="FzAFLwXEunP34fwu3VItB3zr1P8MTOg4URuNVEI1U", access_token="307461472-FZDgkwOuqLnKXYUtUaJzyJYZpFp1Nhy4IrlBURz1", access_token_secret="hoiFrBIe85VbtyMbYcxrXjbFhqUF4a6Qjolw5qbKXc", ) tweet_count = 0 at_count = 0 hash_count = 0 for tweet in ts.searchTweetsIterable(tso): # this is where the fun actually starts :) for char in tweet["text"]: if char == "@": at_count += 1 if char == "#": hash_count += 1 tweet_count += 1 # print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) if tweet_count > 10: break print tweet_count, at_count, hash_count return tweet_count, at_count, hash_count except TwitterSearchException as e: # take care of all those ugly errors if there are some print (e)
def search(text,limit): tweets_list = [] try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(text) # let's define all words we would like to have a look for tso.set_language('en') # we want to see English tweets only tso.set_include_entities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = 'tbHIo3PImh0pSIETLlO8wIKj4', consumer_secret = 'QmzJYSAp9rw6O7tDJATkm7Avq0OBRTfZbdNf3BjEmDmdDB1jT2', access_token = '1315897358-IkDrUD4Zdy6HP3FjF4UxdBqICEZOU91Lys95FGu', access_token_secret = 'nHROttog8743ZmeBWeldvh24EHwXtW4h1Z69o1GsgV2zE' ) # this is where the fun actually starts :) cnt=0 for tweet in ts.search_tweets_iterable(tso): cnt+=1 if cnt>limit: break tweets_list.append(tweet['text']) #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) print cnt,'tweets' except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) return tweets_list
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, keyword, since_id, tweet_count=15): self._results = [] self._i = 0 print("since_id: {0}".format(since_id)) try: tso = TwitterSearchOrder() tso.set_keywords([keyword]) tso.set_language('ja') tso.set_include_entities(False) tso.set_count(tweet_count) if since_id > 0: tso.set_since_id(long(since_id)) ts = TwitterSearch( consumer_key = consumer_key, consumer_secret = consumer_secret, access_token = access_token, access_token_secret = access_token_secret ) for tweet in ts.search_tweets_iterable(tso): self._results.append( { 'screen_name': tweet['user']['screen_name'], 'user_name': tweet['user']['name'], 'profile_image_url': tweet['user']['profile_image_url'], 'text': tweet['text'], 'created_at': tweet['created_at'], 'id': tweet['id'] } ) except TwitterSearchException as e: print(e)
def userSearch(user): ts = TwitterSearch( consumer_key = 'UIBl6otwQD9CtbhRQSQ2GlV8H', consumer_secret = 'MlxVNNZDWfEDBpOTbZwOAPQ8BziP3tcQwMoU3vXdxllzsdgjLu', access_token = '85289745-4PknFj4zSUPd12rbIg8ZkPnAAewZCEmwXj3wyNbiO', access_token_secret = 'A0RNhwgoVh0okZQoL5w6UydpplyTSft1Sx6QCZ4TtvaAC' ) tuo = TwitterUserOrder(user) # create a TwitterUserOrder # start asking Twitter about the timeline return ts.search_tweets(tuo)
def get_tweet(token_num, user, clan): date = time.strftime('%Y-%m-%d %H:%M:%S') ts = TwitterSearch( consumer_key = access_tokens[token_num]['consumer_key'], consumer_secret = access_tokens[token_num]['consumer_secret'], access_token = access_tokens[token_num]['access_token'], access_token_secret = access_tokens[token_num]['access_token_secret'], ) tso = TwitterUserOrder(user) for tweet in ts.search_tweets_iterable(tso): #print('@%s tweeted: %s' % (tweet['user']['screen_name'].encode('utf-8'), tweet['text'].encode('utf-8'))) cursor.execute('INSERT IGNORE INTO tweets (date, name, clan, tweet) VALUES (%s, %s, %s, %s)', (date, user, clan, json.dumps(tweet),))
def execute_twitter(): try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords(['Tourismus']) # let's define all words we would like to have a look for tso.setLanguage('de') # we want to see German tweets only tso.setCount(2) # please dear Mr Twitter, only give us 7 results per page tso.setIncludeEntities(False) # and don't give us all those entity information #Set up the mysql connection conn.set_character_set('utf8') cursor = conn.cursor() cursor.execute('SET NAMES utf8;') cursor.execute('SET CHARACTER SET utf8;') cursor.execute('SET character_set_connection=utf8;') #Set up the mongo connection client = MongoClient() db = client.ktidashboard items = db.crawler stored = items.distinct('identifer') # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = 'Z3rU3WMQnvg5xUsox7Rfg', consumer_secret = 'yQGMdqA9M25V5g2tsmg6GPdZuR9dr73chErWBx94Jk', access_token = '216641262-67CaLeYKWOPz54qObHHe9UESTlncdMyEP4zhl0bI', access_token_secret = 'H827lhCwUQka9TiMG7IwSAJkHVBC3f9DG78sy8uNA' ) for tweet in ts.searchTweetsIterable(tso): # this is where the fun actually starts :) url = '' try: url = re.search("(?P<url>https?://[^\s]+)", tweet['text']).group("url") except: pass time = tweet['created_at'] time = tweet['created_at'][4:] time = time[:15] + time[-5:] fmt = "%b %d %H:%M:%S %Y" time = datetime.strptime(time, fmt).strftime('%Y-%m-%d %H:%M:%S') crawltime= datetime.now().strftime('%Y-%m-%d %H:%M:%S') indeti = getIdentifier(tweet['text'],time) if (indeti not in stored): cursor.execute("""INSERT INTO webcrawler (url,site,author,texts,time,crawltime,source,location) VALUES (%s, %s, %s, %s,%s, %s, %s,%s)""" , (url,'Twitter',tweet['user']['screen_name'].encode('utf-8'),tweet['text'].encode('utf-8'),time,crawltime,'tweet','world')) conn.commit() tweet = {"source": "tweet", "author":tweet['user'],"site":"Twitter","texts": tweet['text'],"location":"welt", "time": time, "crawltime":crawltime, "identifer":indeti } items.insert(tweet) stored.append(indeti) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def getTwitterMentions(self): try: print 'last id = %s' % self.status['twitter']['last_id'] tso = TwitterSearchOrder() tso.setKeywords([self.twitterSearchTerm]) tso.setCount(100) tso.setIncludeEntities(True) tso.setResultType('recent') if self.status['twitter']['last_id']: tso.setSinceID(self.status['twitter']['last_id']) ts = TwitterSearch( consumer_key=self.conf['twitter_app']['consumer_key'], consumer_secret=self.conf['twitter_app']['consumer_secret'], access_token=self.conf['twitter_app']['access_token'], access_token_secret=self.conf['twitter_app']['access_token_secret'] ) i = 1 # this is where the fun actually starts for tweet in ts.searchTweetsIterable(tso): print "#%s. #%s" % ( i , tweet['id'] ) print '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) if self.status['twitter']['last_id'] < tweet['id']: self.status['twitter']['last_id'] = tweet['id'] for u in tweet['entities']['urls']: x = self.expandUrl(u['expanded_url']) print x data = self.readUrlDb(x) data['tweets'][tweet['id']] = { 'id': tweet['id'], 'user_screen_name': tweet['user']['screen_name'], 'user_name': tweet['user']['name'], 'user_id': tweet['user']['id'], 'profile_image_url': tweet['user']['profile_image_url'], 'profile_image_local': self.cacheImage(tweet['user']['profile_image_url']), 'text': tweet['text'], 'created_at': tweet['created_at'], } self.writeUrlDb(data) self.touchBucket3PostByURL(x) i += 1 # take care of all those ugly errors if there are some except TwitterSearchException as e: print(e) self.writeInfoDb()
def getVictims(): frame = SearchFrame() geolocations = [[[] for i in range(0, frame.x)] for j in range(0, frame.y)] for i in range(0, frame.x): for j in range(0, frame.y): tso = TwitterSearchOrder() tso.set_keywords([ "tornado", "EF", "hail", "storm", "damage", "injur", "kill", "wind", "thunder", "water", "flood", "rain", "wound", "loss", "help", "save" ], or_operator=True) tso.set_geocode(frame.longitude + milesToLat(i * frame.diameter), frame.latitude + milesToLat(j * frame.diameter), int(frame.diameter / 2)) searchResults = TwitterSearch( consumer_key="eoe5TmgDTsmI0E2NpSrt7KPfg", consumer_secret= "onNO7vobeUw1piKWfmXQJwmpMKrQbzRWTeXFjKo6nZwipCPJ3r", access_token= "2491643690-UVYiYfoc3VXyQ4dc5B2VcSCMTU0hsl0www8dB7n", access_token_secret= "g8RmQzuyIA0vgxfCgTcMsSshbWYGmhmKfIO3320TQ6isY") for tweet in searchResults.search_tweets_iterable(tso): #print('@%s tweeted: %s' % (tweet['user']['screen_name'],tweet['text'])) geolocations[i][j].append(tweet) geolocations = azureApi(geolocations) returnlist = [] for i in range(0, frame.x): for j in range(0, frame.y): for tweet in geolocations[i][j]: returnlist.append( VictimInfo( GeoPosition(frame.longitude + (i * frame.diameter), frame.latitude + (j * frame.diameter), int(frame.diameter / 2)))) return returnlist
def twitter(phrase): print("twitter", phrase) try: tso = TwitterSearchOrder() tso.set_keywords(phrase.split(' ')) tso.set_language('en') tso.set_include_entities(False) ts = TwitterSearch( consumer_key='5fKb0oXnn188AbpHDckeAITgi', consumer_secret= 'KnMB211TgjmDC36oZihONayTHeIIftidwJc3u5Qgmu9PYWuHzi', access_token= '30289135-xEYFN0uHif4RH03hYXp5W8BedcBFLpon0sIoOKJIz', access_token_secret= 'VRilm9GjJ6s3TmiOsb1YJXe4i5vY52VNMWPSYVGY3MJBC') return sum(1 for t in ts.search_tweets_iterable(tso)) except TwitterSearchException as e: print(e) return 0 return 0
def hashtagSearch(hashtag): ts = TwitterSearch( consumer_key='UIBl6otwQD9CtbhRQSQ2GlV8H', consumer_secret='MlxVNNZDWfEDBpOTbZwOAPQ8BziP3tcQwMoU3vXdxllzsdgjLu', access_token='85289745-4PknFj4zSUPd12rbIg8ZkPnAAewZCEmwXj3wyNbiO', access_token_secret='A0RNhwgoVh0okZQoL5w6UydpplyTSft1Sx6QCZ4TtvaAC') try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords([ hashtag ]) # let's define all words we would like to have a look for tso.set_include_entities( False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens # this is where the fun actually starts :) return ts.search_tweets(tso) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def main(): tso = TwitterSearchOrder() tso.set_keywords(['']) tso.set_geocode(LATLONG[0], LATLONG[1], 50) tso.set_include_entities(False) ts = TwitterSearch( consumer_key='N80C0jvGBfPnmIyvrdWAVMFad', consumer_secret='TIfAaF2Fx9slaQVg19gHX4aNM5BmuhtqvbTxsRvdckcAbSeqgA', access_token='374264753-1Tlw3ovzBmPbzmq6ttA83csCNLNOogfOuZUJA1tk', access_token_secret='1b53oL9YRj1M2LYswIkwfAFQDBAfyfATzV35j7XZx0u0H' ) tweets = [] today = datetime.date.today() for tweet in ts.search_tweets_iterable(tso): if dateutil.parser.parse(tweet['created_at']).date() < today: continue tweets.append(tweet) with open('log/{:%Y-%m-%d}.log'.format(today), 'wb') as log: log.write(json.dumps(tweets))
def getUpdateTweets(): frame = SearchFrame() tso = TwitterSearchOrder() tso.set_keywords( ["breaking", "news", "BREAKING NEWS", "NEWS", "update", "development"], or_operator=True) tso.set_geocode( frame.longitude + milesToLat((frame.x / 2) * frame.diameter), frame.latitude + milesToLat((frame.x / 2) * frame.diameter), int((frame.x / 2) * frame.diameter)) searchResults = TwitterSearch( consumer_key="eoe5TmgDTsmI0E2NpSrt7KPfg", consumer_secret="onNO7vobeUw1piKWfmXQJwmpMKrQbzRWTeXFjKo6nZwipCPJ3r", access_token="2491643690-UVYiYfoc3VXyQ4dc5B2VcSCMTU0hsl0www8dB7n", access_token_secret="g8RmQzuyIA0vgxfCgTcMsSshbWYGmhmKfIO3320TQ6isY") returnlist = [] for tweet in searchResults.search_tweets_iterable(tso): returnlist.append(tweet) return returnlist
def printResearch(keyword): tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords([ 'Bertelsmann']) # let's define all words we would like to have a look for tso.set_language('de') # we want to see German tweets only tso.set_include_entities(False) # and don't give us all those entity information ts = TwitterSearch(CONS_KEY, CONS_SECRET, ACC_TOKEN, ACC_SECRET) for tweet in ts.search_tweets_iterable(tso): print('abc') print('@%s tweeted: %s' % (tweet['user']['screen_name'], tweet['text'])) final_score = 23 #final_score = analyze_tweets(keyword, 10) #final_score = get_sentiment_score(keyword) if final_score <= -0.25: status = 'NEGATIVE | �' elif final_score <= 0.25: status = 'NEUTRAL | 🔶' else: status = 'POSITIVE | ✅' text = 'Average score for ' + keyword+ ' is ' + str(final_score) + ' | ' + status print(keyword + ': ' + text)
def BasicSearch(self, query): self.query = query search = TwitterSearch.BasicTwitterSearch() tweets = search.search(self.api, query) #FOR DEMO for tweet in tweets: print "[", tweet.api_tweet_data.user.screen_name.encode( 'utf-8'), "]:" print "\t", tweet.api_tweet_data.text.encode('utf-8') return tweets
def getUserTimeline(username): try: tuo = TwitterUserOrder(username) # create a TwitterUserOrder # it's about time to create TwitterSearch object again ts = TwitterSearch( consumer_key='3iULonI4Xfllv7xPdxaes8eSh', consumer_secret= 'edlvvermsGTOBXOffE9Gi3tlEpCC8T1e9cET1uUvuht6oS2MSM', access_token= '126733672-EnlC0iMa1WIziFkpJvHAwFc9hNQ0NNC59Ts6afAb', access_token_secret= 'UHZRo6WRT7KwLOTyWeYXqK3vO32AZLQBBhM4mDuPaWfZJ') # start asking Twitter about the timeline #for tweet in ts.search_tweets_iterable(tuo): #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) except TwitterSearchException as e: print(e) return ts.search_tweets_iterable(tuo)
def get_tweet(keywords, limit=10, lang='en'): try: search_order = TwitterSearchOrder() search_order.set_keywords([keywords]) #keywords search tweet search_order.set_language(lang) #language search # search_order.set_include_entities(False) # and don't give us all those entity information # Leaked ini gak masalah udah gak pake twitter lagi soalnya wkwkw twitter_search = TwitterSearch( consumer_key = '6TjrSq7MTFFUshRthH52R1uVU', consumer_secret = '7TpJDGjGvTH7ABZ2KTupWZBl9mg30AykULHHrZBAwy3Oejd2le', access_token = '2501651690-JfLarQjjtxGcjW0GA62j9c2sfzfl3lpH06SZGtV', access_token_secret = 'lSr4jtSrWMt9atxNMoRkw3lWTEbYDRT6Jy8b1PzJceucq' ) # get tweet proccess count = 0 tweets = [] print search_order for tweet in twitter_search.search_tweets_iterable(search_order): # content tweet # tweet['text'] # tweets.append('@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'])) data = (keywords, tweet['text']) tweets.append(data) # print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) if count == limit: break count += 1 return tweets except TwitterSearchException as e: print(e) return None # print get_tweet("sby", 10)
def get_photos(search_terms): try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords( search_terms ) # let's define all words we would like to have a look for tso.setLanguage('en') # we want to see German tweets only tso.setCount( 40) # please dear Mr Twitter, only give us 7 results per page tso.setIncludeEntities( True) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch(consumer_key='CONSUMER_KEY', consumer_secret='CONSUME_SECRET', access_token='ACCESS_TOKEN', access_token_secret='ACCESS_TOKEN_SECRET') total = 0 list_of_media = [] for tweet in ts.searchTweetsIterable( tso): # this is where the fun actually starts :) total += 1 try: ent = tweet[u'entities'] if u'media' in ent: media = ent[u'media'] photo_url = media[0]['media_url'] list_of_media.append(photo_url) except: pass print total return json.dumps(list_of_media) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def Crawl_party(string): # Crawling the latest X tweets from the politician id user_info = api.GetUser(screen_name=string) user_info = user_info.AsDict() print(user_info["screen_name"]) print("Extracting latest tweets...") f = open( '/Users/anubhavjain/Desktop/Politicians_Parties/' + string + '.txt', 'w+') try: tuo = TwitterUserOrder(string) # create a TwitterUserOrder # it's about time to create TwitterSearch object again ts = TwitterSearch(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET, access_token=ACCESS_KEY, access_token_secret=ACCESS_SECRET) # start asking Twitter about the timeline ran = np.random.randint(25) count = 0 for tweet in ts.search_tweets_iterable(tuo): count += 1 f.write( str(tweet).encode('ascii', 'ignore').decode('ascii') + "\n") if (count == ran): Random_tweet(string, tweet) #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) print("Extracted latest", str(count), "tweets") except TwitterSearchException as e: # catch all those ugly errors print(e) f.close()
def search_twitter(keywords): try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords( keywords ) # let's define all words we would like to have a look for tso.set_language('en') # we want to see German tweets only tso.set_include_entities( False) # and don't give us all those entity information info = ks.get_twitter_keys() # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch(consumer_key=info["consumer_key"], consumer_secret=info["consumer_secret"], access_token=info["access_token"], access_token_secret=info["access_secret"]) tweets = [] for tweet in ts.search_tweets_iterable(tso): if not tweet['text'].startswith('RT') and not tweet['text'].startswith(keywords[0]) \ and not tweet['text'].startswith('"' + keywords[0]): value = { 'id': tweet['id_str'], 'user': tweet['user']['screen_name'], 'text': tweet['text'] } url = "http://twitter.com/{0}/status/{1}".format( value['user'], value['id']) info = [url, value['text'], value['user']] tweets.append(info) if len(tweets) == 10: break return tweets except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def getTweetsForKeyword(keyword, last_id=None): """ Get the (recent) tweets for a given keyword :param keyword: the query keyword :return: a list of tweets. List is empty if an error occurs """ tweet_list = [] try: print '*** Searching tweets for keyword:', keyword, ' ...' tso = TwitterSearchOrder() tso.setKeywords([keyword]) tso.setLanguage('en') tso.setResultType('recent') tso.setCount(100) tso.setIncludeEntities(True) if last_id is not None: tso.setSinceID(last_id) ts = TwitterSearch( consumer_key=params.CONSUMER_KEY, consumer_secret=params.CONSUMER_SECRET, access_token=params.ACCESS_TOKEN, access_token_secret=params.ACCESS_TOKEN_SECRET ) ts.authenticate() counter = 0 for tweet in ts.searchTweetsIterable(tso): counter += 1 tweet_list.append(tweet) print '*** Found a total of %i tweets for keyword:' % counter, keyword return tweet_list except TwitterSearchException, e: print "[ERROR]", e.message return tweet_list
def twitter_tag_search(ck, cs, at, ats, tag, count, lang, proxy=None): """ function for twitter search on hashtags and keywords """ tso = TwitterSearchOrder() tso.set_keywords(tag) if lang == 'en' or lang == 'nl': tso.set_language(lang) tso.set_result_type('recent') if proxy: ts = TwitterSearch(ck, cs, at, ats, proxy=proxy) else: ts = TwitterSearch(ck, cs, at, ats) tweetcount = 0 for tweet in ts.search_tweets_iterable(tso): if tweetcount < count: print('@{} - {}\n{}\n' .format(tweet['user']['screen_name'], tweet['created_at'], tweet['text'])) tweetcount = tweetcount + 1 else: break
def search(self): try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords( self.keywords ) # let's define all words we would like to have a look for tso.set_language('en') # we want to see English tweets only tso.set_include_entities( False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch(consumer_key=self.consumer_key, consumer_secret=self.consumer_secret, access_token=self.access_token, access_token_secret=self.access_secret) # this is where the fun actually starts :) for tweet in ts.search_tweets_iterable(tso): #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) self.tweettext.append(tweet['text']) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) for twttxt in self.tweettext: tokenizer = RegexpTokenizer(r'\w+') #tokens = nltk.word_tokenize(twttxt) tokens = tokenizer.tokenize(twttxt) tags = nltk.pos_tag(tokens) for word, pos in tags: if pos in ['JJ']: # feel free to add any other noun tags self.adjectives.append(word) for word, pos in tags: if pos in ['NN']: # feel free to add any other noun tags self.nouns.append(word) adjHist = Counter(self.adjectives) print "Histogram of Adjectives : " + str(adjHist) nounHist = Counter(self.nouns) print "Histogram of Nouns : " + str(nounHist)
def buscaTweets(): try: tso = TwitterSearchOrder() tso.set_keywords(['lula']) #palavras pra serem buscadas tso.set_language('pt') #linguagem de twites tso.set_include_entities(False) #chaves de acesso ts = TwitterSearch( consumer_key = 'xxxxxx', consumer_secret = 'xxxxxx', access_token = 'xxxxxx', access_token_secret = 'xxxxxxxx' ) resultEncontrado = ts.search_tweets_iterable(tso) #percorrendo o que foi encontrado for tweet in resultEncontrado: # print('@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'])) analisarFrase(tweet['text']) #exceção caso algo de errado aconteça except TwitterSearchException as e: print(e)
def main(): profilewords = "" number = 0 # get our data as an array from read_in() lines = read_in() try: tuo = TwitterUserOrder(lines) # create a TwitterUserOrder # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key='L9khFUeK24k1Vxre6EEGSwl3U', consumer_secret= 'Vhm1lzZF58DstT43PUGKmWcrFDdgTHNni4OidTQ3IoYHultyoO', access_token='1080045656947867649-TWUvH5NESuJpiPWYB9LBwyILEBGDXW', access_token_secret='3qp0NvV07T4Y2xYGf7ksPE34ikJBBXqyOMovnCKFuBhyQ' ) tweets = ts.search_tweets_iterable(tuo) # start asking Twitter about the timeline for tweet in ts.search_tweets_iterable(tuo): number = number + 1 profilewords += " " + tweet['text'] #only get last 100 tweets if number == 150: break # get rid of the pesky twittr links text = re.sub(r'http\S+', '', profilewords) # and peroids/commas text = (re.sub(r'[^\w\s]', '', text)) # and new lines text = re.sub("\n|\r", "", text) print(text) except TwitterSearchException as e: # catch all those ugly errors print(e)
def search(): try: tso = TwitterSearch.TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(['', 'Doktorarbeit']) # let's define all words we would like to have a look for tso.set_language('de') # we want to see German tweets only tso.set_include_entities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key = 'aaabbb', consumer_secret = 'cccddd', access_token = '111222', access_token_secret = '333444' ) # this is where the fun actually starts :) for tweet in ts.search_tweets_iterable(tso): print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) except TwitterSearch.TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def twitterapi_summaries(query, location): try: if ' ' in query: kw = query.split() kw.append(location) else: kw = list(query) kw.append(location) tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(kw) # let's define all words we would like to have a look for tso.set_language('en') # we want to see German tweets only tso.set_count(10) # we want to see German tweets only tso.set_include_entities(False) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key='We3N5LWOH63onKrHAH0p8oe3I', consumer_secret='k4fA8Jxtv6qxl987IlqlX1iWHZGc2nYauoF3Nlg3A7OXwK1yyd', access_token='3905868207-FooZNMEk4V1MIMEb6uPvdudIaVaXxyJJGL5iAFr', access_token_secret='nbaKLI2IpRpDg9MFM1UISBLFpSdjQYTSISGTPmhxwlnkt' ) news_summaries = [] # this is where the fun actually starts :) n = 0 for tweet in ts.search_tweets_iterable(tso): if n < 10: # print('@%s tweeted: %s' % (tweet['user']['screen_name'], tweet['text'], tweet['created_at'])) news_summaries.append({'user': tweet['user']['screen_name'], 'description': tweet['text'], 'publishTime': tweet['created_at']}) n += 1 else: break return news_summaries, n except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def twitterScrapy(t): sh = client.open("NewsScrapy") sheet_2 = sh.get_worksheet(1) #resize the sheet to append to the first blank row sheet_2.resize(1) try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(['fraud'], or_operator=True) tso.set_keywords(['scam'], or_operator=True) tso.set_keywords(['breach']) tso.set_include_entities( False) # and don't give us all those entity information ts = TwitterSearch(consumer_key='***', consumer_secret='***', access_token='***', access_token_secret='***') # this is where the fun actually starts :) for tweet in ts.search_tweets_iterable(tso): if int(tweet['retweet_count']) > t: s = tweet['text'] rt = re.compile(r'RT', flags=re.I | re.X) match = rt.search(s) if (match is None): print(tweet['user']['screen_name'], tweet['text'], tweet['retweet_count']) report_sheet = [ tweet['user']['screen_name'], tweet['text'], tweet['retweet_count'] ] sheet_2.append_row(report_sheet) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def tweet_search(keywords, tweet_lang): # load yaml file with secrets to dictionary credentials = yaml.safe_load(open("./credentials.yml")) try: tso = TwitterSearch.TwitterSearchOrder( ) # create a TwitterSearchOrder object tso.set_keywords( keywords ) # defines all words that we like to search for in a tweet tso.set_language( tweet_lang) # set the language of tweets we are searching for tso.set_include_entities(False) # no entity information # create a TwitterSearch object with our secret tokens ts = TwitterSearch.TwitterSearch( consumer_key=credentials['database']['consumer_key'], consumer_secret=credentials['database']['consumer_secret'], access_token=credentials['database']['access_token'], access_token_secret=credentials['database']['access_token_secret']) # Save all tweets in a nested dic # twitty{"id"} # |- {date} -> tweet creation date # |- {text} -> tweet text twitty = {} for tweet in ts.search_tweets_iterable(tso): # Dict based on tweet ID, assign a new dict as value twitty[tweet["id"]] = {} # Key is date and value "created at" twitty[tweet["id"]]["date"] = tweet["created_at"] # Key is text and value is the tweet twitty[tweet["id"]]["text"] = tweet["text"] return twitty except TwitterSearch.TwitterSearchException as e: print(e)
def HashTracker(): #ts = TwitterSearch( #consumer_key = 'oiqPmaj6hTVywkKlizDzw50l8', #consumer_secret = 'jHSBDW8E1doOnKKfabdMEnJFIIU0UCz9ufVwvhNhlDNu0Hessq', #access_token = 'boomgoes', #access_token_secret = 'thedynamite') try: tso = TwitterSearchOrder() tso.set_keywords(["#lol"]) tso.set_negative_attitude_filter() tso1 = TwitterSearchOrder() tso1.set_keywords(["#lol"]) tso1.set_positive_attitude_filter() ts = TwitterSearch( consumer_key='oiqPmaj6hTVywkKIizDzw50l8', consumer_secret= 'jHSBDW8E1doOnKKfabdMEnJFIIU0UCz9ufVwvhNhIDNu0Hessq', access_token='42780587-4kuFLgjRn0sq4EyE1hUqdsXhkPRMt0SwvFfHsv3Dr', access_token_secret='MuljzOQYyOyaI4A0098vVDZc6xTcBeCfRSi0iUbYDMrDc' ) neg = 0 pos = 0 for tweet in ts.search_tweets_iterable(tso): neg = neg + 1 print(neg) for tweet in ts.search_tweets_iterable(tso1): pos = pos + 1 print(pos) except TwitterSearchException as e: print(e)
def searchTwitter(pThisTerm): try: # create a TwitterSearchOrder object tso = TwitterSearchOrder() # Define all the keywords which must be passed URL encoded. # Only return/filter tweets that contain links tso.setKeywords(["filter%3Alinks",pThisTerm]) # we want to see English tweets only' tso.setLanguage('en') # maximum number of tweets to return tso.setCount(100) # include the entity information tso.setIncludeEntities(True) #tso.setResultType('recent') # create a TwitterSearch object with my secret tokens (@CorrenMcCoy) ts = TwitterSearch( consumer_key = 'LrA1DdH1QJ5cfS8gGaWp0A', consumer_secret = '9AX14EQBLjRjJM4ZHt2kNf0I4G77sKsYX1bEXQCW8', access_token = '1862092890-FrKbhD7ngeJtTZFZwf2SMjOPwgsCToq2A451iWi', access_token_secret = 'AdMQmyfaxollI596G82FBipfSMhagv6hjlNKoLYjeg8' ) # Iterate over the tweet entities which are in a nested dictionary tweetFile = codecs.open('C:/Python27/myFiles/tweetFile.txt','a','utf-8') for tweet in ts.searchTweetsIterable(tso): #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) if tweet['user']['url'] is not None: print (tweet['user']['url']) tweetFile.write(tweet['user']['url']+'\n'); tweetFile.close() # Error handling. Close file and terminate except TwitterSearchException as e: tweetFile.close() print(e) exit()
def search(keyword): try: tso = TwitterSearchOrder() tso.set_keywords([i for i in keyword.split()]) tso.set_language('en') tso.set_include_entities(False) ts = TwitterSearch( consumer_key='8xwfAotIMlpjBidB6LN2oBaVb', consumer_secret= 'JHZU1hhfsQZON1L6zZM4NlomA9gKWDTwasPF3b7mkzzY0i9WRN', access_token='1040904429333929986-rJveV2YAwYJnTyhG9fIY436jCSJm9F', access_token_secret='tJMiqZMBOQJFre39kcDM0GW3aEEDhikSWq3q1y8IBYiHe' ) tweets = [] for tweet in ts.search_tweets_iterable(tso): #tweets.append('@%s tweeted: %s (end)' % ( tweet['user']['screen_name'], tweet['text'] ) ) tweets.append(tweet['text']) return tweets except TwitterSearchException as e: return []
def getTweets(): wordlist = [] politicians = CONF['POLITICIANS'] sexistWords = ['bitch'] for word in sexistWords: for politician in politicians: wordlist.append(word + ' ' + politician) tweets = [] try: tso = TwitterSearchOrder() tso.set_keywords(wordlist, or_operator=True) # all the terms to search for tso.set_language('en') ts = TwitterSearch(consumer_key=CONF['APP_KEY'], consumer_secret=CONF['APP_SECRET'], access_token=CONF['OAUTH_TOKEN'], access_token_secret=CONF['OAUTH_TOKEN_SECRET']) for tweet in ts.search_tweets_iterable(tso): tweets.append(tweet) return tweets except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def getTweets(keyword1): search_order = TwitterSearchOrder() search_order.set_keywords([keyword1]) search_order.set_count(2) search_order.set_include_entities(False) ts = TwitterSearch( consumer_key="BkczwDXiYQWAf2klUbnv2hEO0", consumer_secret="JZibmuDUVcQ6utlG9kcoujtbJKHByoC2uM26muO9dpG1K49Hnm", access_token="1051442228-Hrwir9aT8K8kFFg8zfiV9VfCW2QAEk47W5xZrRm", access_token_secret="9dLGcZTYkLnazEBbghnhIwNIXM1fjZDCzQkn2kn4NUUTY") x = [] counter = 0 for happy_tweet in ts.search_tweets_iterable(search_order): if counter < 10: x += [('@%s tweeted: %s' % (happy_tweet['user']['screen_name'], happy_tweet['text']))] counter += 1 else: break return x
class TwitterCaller(object): def __init__(self, creds_file): self.creds = TwitterCreds(creds_file) self.twitter_search = TwitterSearch(self.creds.consumer_key, self.creds.consumer_secret, self.creds.access_token, self.creds.access_token_secret) def search(self, keyword_list): tso = TwitterSearchOrder() tso.set_keywords(keyword_list) tso.set_language('en') tso.set_include_entities(False) return self.twitter_search.search_tweets_iterable(tso)
def jobInteraction(tag): tags = tag.split(",") tweets = [] try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords( tags) # let's define all words we would like to have a look for tso.set_language('es') # we want to see German tweets only tso.set_include_entities( True) # and don't give us all those entity information # it's about time to create a TwitterSearch object with our secret tokens ts = TwitterSearch( # consumer_key='QVhq5HVolTuzE79c16YDegtia', # consumer_secret='bfSPCAKXca52eaa2GF4a4mGceYVy4VkENwSuQtnr2c9e34TgWq', # access_token='1196870581-DfDo1GQQaukRZQBWn72ugdATSJqKPb4EaMsOFRK', # access_token_secret='tRV1lizrsCj8maKxOkzcDvp6vGJLBgDXH0ueEzmXSQTOi' consumer_key='gDEFFAToqZ1j5cE9SgJkeqvBY', consumer_secret= 'jqKGAra9Kd0n4jwsQXkhairyxx0uv9D4iMme6AeE2NLDX3fPfz', access_token='17160146-FxfSx4Bdq7SvuENSgHvi175f7uyjwoHCHVMUYiJQP', access_token_secret='SREyq0DxHOurUY5E0AbT3kPDwl5IFDcPFmnehZjbaH5ab' ) # this is where the fun actually starts :) for tweet in ts.search_tweets_iterable(tso): # print('@%s tweeted: %s' % (tweet['user']['screen_name'], tweet['text'])) tweets.append(tweet) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e) print len(tweets) return json.dumps(tweets)
def query_twitter(keywords): """ Queries the Twitter Search API with the given keywords and creates a pickle file of raw tweets collected for all search terms. """ try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_keywords(keywords) # let's define all words/hashtags we would like to look for tso.set_include_entities(True) # and give us all entity information ts = TwitterSearch(consumer_key='c4HS566Njmswz20ZMTjikGUSv', consumer_secret='b7EH8Cpou9CjXR3pbfsA0Pdzli7YNvghQVXwPrSEV7y0bFhaSm', access_token='205479111-sko5QEoOgWDdjeZBiEgZo8hUozjRhBIBCy4hNiM7', access_token_secret='4fqX3U4UhTHoOwodNaNcvJ5mNWzff142BQWlQTPtn7jxa') counter = 0 # rate-limit counter sleep_at = 44900 # enforce delay after receiving ~45k tweets (450 calls and 100 tweets per page) sleep_for = (60 * 15 + 1) # sleep for 15 mins all_tweets = [] # iterate through all the tweets and pages of results for tweet in ts.search_tweets_iterable(tso): all_tweets.append(tweet) counter += 1 # increase counter if counter >= sleep_at: # it's time to apply the delay counter = 0 sleep(sleep_for) # sleep for n secs except TwitterSearchException as e: print(e) finally: # pickle the collected tweets print('Number of tweets collected: %d' % len(all_tweets)) pickle.dump(all_tweets, open('/Users/miljan/PycharmProjects/entity-dependent-sentiment-mining/data/twitter/tube_strike_tweets_' + strftime("%Y-%m-%d_%H:%M:%S", gmtime()) + '.pickle', 'wb'))
def twitter_queue(): """ Takes the last used set of twitter credentials and executes the Q until it reaches a count of 160. """ credentials = models.TwitterCredentials.objects.order_by('last_used') search_counter = 0 cred_counter = 0 total_cred_counter = credentials.count() creds = credentials[0] ts = TwitterSearch( consumer_key=creds.consumer_key, consumer_secret=creds.consumer_secret, access_token=creds.access_token, access_token_secret=creds.access_token_secret, ) # Once this set has been used for 160 queries, switch to second set. if counter >= 160 and cred_counter < total_cred_counter: cred_counter += 1 counter = 0 try: creds = credentials[cred_counter] ts = TwitterSearch( consumer_key=creds.consumer_key, consumer_secret=creds.consumer_secret, access_token=creds.access_token, access_token_secret=creds.access_token_secret, ) except IndexError: exit()