def test_twitter_client(): client = TwitterClient() latest_tweet = client.get_latest_tweet() assert_not_none(latest_tweet) assert_true("#Bitcoin" in latest_tweet)
def test_get_matplotlib_tweets(self): """ Check if we get the expected results for bioconductor. """ term = "matplotlib" client = TwitterClient() results = client.get_tweets(term) client.write_results(results) assert len(results) == 49
def search(): print(f"Method called {request.method} {request.form}") query = request.form['query'] # query = "trump" count = 200 print(f"Query {query}") twitterclientObj = TwitterClient() jsonoutput = twitterclientObj.main(query, count) jsonoutput["query"] = query return render_template('result.html', title="page", data=jsonoutput)
def initTweetStreaming(): # creating object of TwitterClient Class api = TwitterClient() myStreamListener = MyStreamListener() try: myStream = tweepy.Stream(auth=api.auth, listener=myStreamListener) myStream.filter(track=charSets.filterSet, stall_warnings=True, is_async=True, languages=["en"]) # various exception handling blocks except KeyboardInterrupt: sys.exit() except AttributeError as e: print('AttributeError') pass except tweepy.TweepError as e: print('Exception') print(e) if '401' in e: print('401 exception response') print(e) sleep(60) pass else: #raise an exception if another status code was returned, we don't like other kinds raise e except Exception as e: print('Unhandled exception') raise e
def plot2(self): """ plot that generates likes and retweets of followers as a function of time (hours) """ twitter_client = TwitterClient() tweet_analyzer = TweetAnalyzer() api = twitter_client.get_twitter_client_api() tweets = api.user_timeline(screen_name=self.getUser_name()) df = tweet_analyzer.tweets_to_data_frame2(tweets) time_likes = pd.Series(data=df['likes'].values, index=df['date']) time_likes.plot(figsize=(15, 5), label="likes", legend=True) time_retweets = pd.Series(data=df['retweets'].values, index=df['date']) time_retweets.plot(figsize=(15, 5), label="retweets", legend=True) plt.show()
def input_text(): if request.method == "POST": print(request.form) data = request.form['submitText'] print(data) query = data # creating object of TwitterClient Class api = TwitterClient() # calling function to get tweets # for i in range(2): tweets = api.get_tweets(query = query, count = 100) langP = LangProcessor() pos = langP.get_pos(query, tweets) text = "corpus.txt" langP.addtoCorpus(text, tweets) list_of_lists = [] with open("corpus.txt", "r") as f: for line in f: list_of_lists.append(line.split()) model = langP.build_model(list_of_lists, query) similars = [] similars.append(model[0][0]) similars.append(model[1][0]) similars.append(model[2][0]) definition = langP.generateDefinition(query, pos, similars) print(definition) pos_display = '' if 'NN' in pos: pos_display = 'noun' if 'VB' in pos: pos_display = 'verb' # if 'RB' in pos: # pos_display = # import pdb;pdb.set_trace() # return jsonify(isError=False, # message="Success", # statusCode=200, # stuff=data # ), 200 return render_template("define.html", definition=definition)
class Controller: def __init__(self): self.twitter_client = TwitterClient().get_client() self.weather_requester = WeatherRequester() def get_hottest_city_and_tweet_result(self): try: location_temp_tup = self.weather_requester.find_hottest_city_and_temp( ) except RuntimeError as e: report_error(e.message) print "Sent error info to [email protected]" return -1 tweet = "Today, the hottest city in the U.S is %s with a temperature of %sF" % location_temp_tup self.twitter_client.post_text_tweet(tweet) print "Succesfully posted tweet for " + str(datetime.today().date()) return 0
class Controller: def __init__(self): self.twitter_client = TwitterClient().get_client() self.weather_requester = WeatherRequester() self.logger = logging.getLogger('HottestCityBot') def get_hottest_city_and_tweet_result(self): try: location_temp_tup = self.weather_requester.find_hottest_city_and_temp( ) tweet = "Today, the hottest city in the U.S is %s with a temperature of %sF" % location_temp_tup self.twitter_client.post_text_tweet(tweet) self.logger.info("Succesfully posted tweet for " + str(datetime.today().date())) except ConnectionError: self.logger.exception( "Run failed: could not establish a connection to an external API." ) report_error() return -1 return 0
def __init__(self, rbot): self.reply_q = queue.PriorityQueue() self.job_q = queue.PriorityQueue() reddit_workers = RedditWorkers(rbot, self.job_q, self.reply_q) tw_client = TwitterClient() job_handler_worker = JobHandlerWorker(VisionOCR, tw_client, self.job_q, self.reply_q) self.reply_worker_t = Thread(target=reddit_workers.reply_worker, daemon=True) self.notif_listener_t = Thread(target=reddit_workers.notif_listener, daemon=True) self.sub_feed_listener_t = Thread( target=reddit_workers.sub_feed_listener, daemon=True) self.score_listener_t = Thread(target=reddit_workers.score_listener, daemon=True) self.job_handler_t = Thread(target=job_handler_worker.job_handler, daemon=True)
class TweetAnalyser(): def __init__(self): self.twitter_client = TwitterClient() self.api = self.twitter_client.getTwitterClientAPI() self.accounts = self.read_file() self.results = [] self.accounts_counter = 0 def read_file(self): with open(ACCOUNTS_FILE, "r") as f: print("Readind file : [ {0} ]".format(ACCOUNTS_FILE)) content = f.read() print("File : [ {0} ] is read".format(ACCOUNTS_FILE)) return content.splitlines() def write_in_file(self, input_text_list): with open(RESULTS_FILE, "w") as f: f.write("\n".join(input_text_list)) def is_relevant(self, text): text = text.lower() return any( key_word for key_word in KEY_WORDS if key_word in text ) def get_position(self, text): # Avoid sentiment returning 0 # return 0 text = text.lower() if TOPICO == "politica": text = re.sub(r'^https?:\/\/.*[\r\n]*', '', text, flags=re.MULTILINE) clf = SentimentClassifier() value = clf.predict(text) else: analysis = TextBlob(text) value = analysis.sentiment.polarity if value > .55: return 1 elif value < .35 : return -1 else: return 0 def final_position(self, positions): if len(positions) == 0: return "There are not enough data to analyze the account" account_position = sum(positions)/len(positions) if account_position > .15 : return "Follower" elif account_position < -.15: return "Opposition" else: return "Neutral" def tweets_to_data_frame(self, tweets): text = [] id = [] tweet_len = [] likes = [] rt = [] position = [] for tweet in tweets: if not tweet.retweeted and self.is_relevant(tweet.text): text.append(re.sub(r'^https?:\/\/.*[\r\n]*', '', tweet.text, flags=re.MULTILINE)) id.append(tweet.id) tweet_len.append(len(tweet.text)) likes.append(tweet.favorite_count) rt.append(tweet.retweet_count) position.append(self.get_position(tweet.text)) data = { "id": id, "text": text, "len tweet": tweet_len, "likes": likes, "rt": rt, "position": position } return pandas.DataFrame(data) def worker(self): for account in self.accounts: print("Computing account = {0}".format(account)) if account == "": break # Print progress of computing self.accounts_counter += 1 procces_completed = str( int( (self.accounts_counter/len(self.accounts))*100 ) ) print( "Process completed [ {0}% ]"\ .format(procces_completed) ) try: tweets = self.api.user_timeline( screen_name=account, count = 1000 ) df = self.tweets_to_data_frame(tweets) account_result_file = "{0}{1}.csv".format(RESULTS_DIR, account) df.to_csv( account_result_file, encoding='utf-8', index=False ) self.results.append( "{0},{1}".format(account, self.final_position(df["position"])) ) except TweepError as err: print("The account [ {0} ] is private, skipping...".format(account)) def main(self): print("Mining accounts ...") self.worker() print("Tweets saved in directory") print("Saving results in [ {0} ]".format(RESULTS_FILE)) self.write_in_file(self.results) print("Process finished")
import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from components import nav from components import index from components import sources from components.method import method_page from components.dashboards import create_dashboards from components.tweetcard import create_deck import json with open("locations.json", "r") as location_file: locations = json.load(location_file) api = TwitterClient() tweets = [] for location in locations: geocode = str(location['geocode']) city_tweets = api.get_tweets(query='blm', count=20, geocode=geocode, city=location['city']) tweets = tweets + city_tweets # picking positive tweets from tweets # ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive'] # percentage of positive tweets # print("Positive tweets percentage: {} %".format(100 * len(ptweets) / len(tweets))) # picking negative tweets from tweets # ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']
def api(query): query_limit = int(os.getenv('QUERY_LIMIT')) api = TwitterClient() try: tweets = api.get_tweets(query, query_limit) except TweepError as e: return jsonify({ "status_code": 429, "message": "Too many requests. Try again later" }) if len(tweets) == 0: return jsonify({"status_code": 400, "message": "Not a valid query"}) positive = 0 negative = 0 neutral = 0 positive_tweets = [] negative_tweets = [] neutral_tweets = [] for tweet in tweets: sentiment = get_sentiment(tweet['text']) if sentiment == 1: tweet['sentiment'] = 'positive' positive += 1 positive_tweets.append(tweet) elif sentiment == -1: tweet['sentiment'] = 'negative' negative += 1 negative_tweets.append(tweet) else: tweet['sentiment'] = 'neutral' neutral += 1 neutral_tweets.append(tweet) total_per = positive + negative + neutral positive_per = round(((positive / total_per) * 100), 2) negative_per = round(((negative / total_per) * 100), 2) neutral_per = round(((neutral / total_per) * 100), 2) mean_total = positive + negative positive_mean = round(((positive / mean_total) * 100), 2) negative_mean = round(((negative / mean_total) * 100), 2) positive_tweets = sorted(positive_tweets, key=itemgetter('retweet_count'), reverse=True) negative_tweets = sorted(negative_tweets, key=itemgetter('retweet_count'), reverse=True) neutral_tweets = sorted(neutral_tweets, key=itemgetter('retweet_count'), reverse=True) positive_word_count = get_word_count(positive_tweets, query) negative_word_count = get_word_count(negative_tweets, query) neutral_word_count = get_word_count(neutral_tweets, query) sentiment = '' if abs(positive_mean - negative_mean) < 10.0: sentiment = 'Controversial' elif positive_mean > negative_mean: sentiment = 'Positive' else: sentiment = 'Negative' WOE_ID = 1 trending = api.get_trending(WOE_ID) return jsonify({ 'sentiment': sentiment, 'count': { 'positive': positive, 'negative': negative, 'neutral': neutral, 'total': total_per }, 'mean': { 'positive': positive_mean, 'negative': negative_mean }, 'results': { 'positive': positive_per, 'negative': negative_per, 'neutral': neutral_per }, 'status_code': 200, 'message': 'Request Successful!', 'trending': trending[:5], 'tweets': { 'positive_tweets': positive_tweets[:5], 'negative_tweets': negative_tweets[:5], 'neutral_tweets': neutral_tweets[:5] }, 'word_count': { 'positive': positive_word_count, 'negative': negative_word_count, 'neutral': neutral_word_count }, 'query': query.title() })
def test_twitter_client_images(): client = TwitterClient() images = client.get_images_by_hashtag("#helloworld", 10)
def process_review_data(spark, output_data): #create conncetin to Twitter api = TwitterClient()
def test_twitter_client(): client = TwitterClient() tweets = client.get_tweets_by_hashtag("#helloworld", 10)
from TwitterClient import TwitterClient # Keys for Tweepy TwitterClient CONSUMER_KEY = 'fKmzykMYS2SfPSmHNyNI5dT9a' CONSUMER_SECRET = 'Nq2r9E1L8rx4OQ8xIEzvFAfpVnSy4pp7Fva5QJleyOzUqg46jC' ACCESS_TOKEN = '4608889037-DuDZdgi1wXcJTj5XI2l93SkHx5vnimmIa6fqOEh' ACCESS_TOKEN_SECRET = 'ylFmbrAu9PuoweHwPx3X1gw158nO3jTD94fuDiL446phw' TWEET_FILEPATH = "/Users/declanjones/Desktop/TweetProj/Tweet_Data/Tweets/tweet.txt" # Initialize Tweepy Twitter Client client = TwitterClient(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET) client.scrape() # Get tweets to tweet from tweet.txt f = open(TWEET_FILEPATH, 'rb') allTweets = f.readlines() nextTweet = '' if allTweets: nextTweet = allTweets[0] f.close() if nextTweet != '': client.post(nextTweet) # Remove tweeted tweet from tweet.txt for next pass f = open(TWEET_FILEPATH, 'wb') index = 1 tweetsLeft = len(allTweets) - 1 while index < tweetsLeft: f.write(allTweets[i])
def __init__(self): self.twitter_client = TwitterClient().get_client() self.weather_requester = WeatherRequester()
from TwitterClient import TwitterClient from Analysis import Analysis if __name__ == '__main__': twitter_client = TwitterClient() # twitter_client.create_stream(['crypto', 'cryptocurrency', 'xrp', 'btc', 'bitcoin', 'ripple']) # twitter_client.sample_tweets("python", 100) # twitter_client.backfill_tweets() analysis = Analysis() print(analysis.find_general_sentiment()) analysis.generate_user_sentiment()
from TwitterClient import TwitterClient # Keys for Tweepy TwitterClient CONSUMER_KEY = 'fKmzykMYS2SfPSmHNyNI5dT9a' CONSUMER_SECRET = 'Nq2r9E1L8rx4OQ8xIEzvFAfpVnSy4pp7Fva5QJleyOzUqg46jC' ACCESS_TOKEN = '4608889037-DuDZdgi1wXcJTj5XI2l93SkHx5vnimmIa6fqOEh' ACCESS_TOKEN_SECRET = 'ylFmbrAu9PuoweHwPx3X1gw158nO3jTD94fuDiL446phw' TWEET_FILEPATH = "/Users/declanjones/Desktop/TweetProj/Tweet_Data/Tweets/tweet.txt" # Initialize Tweepy Twitter Client client = TwitterClient(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET) client.scrape() # Get tweets to tweet from tweet.txt f = open(TWEET_FILEPATH, 'rb') allTweets = f.readlines() nextTweet = '' if allTweets: nextTweet = allTweets[0] f.close() if nextTweet != '': client.post(nextTweet) # Remove tweeted tweet from tweet.txt for next pass f = open(TWEET_FILEPATH, 'wb') index = 1 tweetsLeft = len(allTweets) - 1 while index < tweetsLeft:
def __init__(self): self.twitter_client = TwitterClient() self.api = self.twitter_client.getTwitterClientAPI() self.accounts = self.read_file() self.results = [] self.accounts_counter = 0
def run(): bitcoin_client = BitcoinClient() twitter_client = TwitterClient() bot = TwitterBot(bitcoin_client, twitter_client) bot.run()
def main(): twitterClient = TwitterClient() while True: tweets = twitterClient.get_tweets_sentiments(twitterClient.get_tweets()) twitterClient.retweet(tweets) time.sleep(30)
from TwitterClient import TwitterClient from flask import Flask, request, jsonify from flask_cors import CORS tc = TwitterClient() app = Flask(__name__) CORS(app) @app.route('/twittersentiment', methods=['POST']) def twittersentiment(): topic = request.get_json() tweets = tc.get_tweets(topic['query'], topic['count']) if len(tweets) == 0: message = {'message': 'No tweets found on that topic'} return jsonify(message) else: ptweets = [ tweet for tweet in tweets if tweet['sentiment'] == 'positive' ] ntweets = [ tweet for tweet in tweets if tweet['sentiment'] == 'negative' ] statistics = { 'positiveTweets': { 'tweets': ptweets, 'percentage': len(ptweets) / len(tweets) * 100 }, 'negativeTweets': { 'tweets': ntweets,
class Scraper(object): def __init__(self, trending): self.trending = trending self.tc = TwitterClient() self.start_twitter_stream() thread = Thread(target=self.start_stock_twits_stream) thread.start() """ Soft copy of trending @:param trending """ def set_trending(self, trending): for i in range(len(trending)): self.trending[i] = trending[i] def start_twitter_stream(self): self.tc.get_tweets_stream(self.trending) def close_twitter_stream(self): pass self.tc.close_stream() def start_stock_twits_stream(self): while (StockInfo.flag): messages = stc.get_stock_streams(self.trending)['messages'] for message in messages: # Check what stock the message is talking about for stock in self.trending: if stock not in message['body']: continue if stock not in StockInfo.stock_info: StockInfo.stock_info[stock] = PriorityQueue(maxsize=50) # Check if someone is spamming stream if self.isDup(StockInfo.stock_info[stock], message['body']): continue if StockInfo.stock_info[stock].full(): StockInfo.stock_info[stock].get() tweet_sentiment = self.tc.get_tweet_sentiment( message['body']) MyLogger.dwrite(message['body'], str(tweet_sentiment)) StockInfo.stock_info[stock].put( (message['created_at'], tweet_sentiment, message['body'])) # Rate limit is 400 requests per hour = 1/9 requests per second t.sleep(10) def isDup(self, q: PriorityQueue, text): queue_arr = list(q.queue) for data in queue_arr: if data == text: return True return False
def main(): # input for term to be searched and how many tweets to search tools = Tools() validInput = True while True: print("|******************************************|") print("|Welcome to Sentimental Analysis on Twitter|") print("|******************************************|") searchTerm = input("Enter Keyword/Tag to search about: ").replace(" ", "") if searchTerm.isnumeric(): print("Please enter a valid keyword") continue elif len(searchTerm) <= 3: print("Please enter a keyword with more than 3 characters") continue while True: noOfTerms = input("Enter how many tweets to search: (< 500): ") if noOfTerms.isnumeric(): if int(noOfTerms) > 500: print("Defaulting to number of tweets to 500") noOfTerms = int(500) else: print("Please enter a numeric value < 500.") continue twitterClient = TwitterClient() rawTweets = twitterClient.get_tweets(searchTerm, int(noOfTerms)) # clean the tweets before adding to the dictionary if len(rawTweets) != 0: cleanedTweets = tools.clean_tweet(rawTweets, searchTerm) dataAnalysis = DataAnalysis(cleanedTweets) tweetSentiment = dataAnalysis.sentimentAnalysis(searchTerm) tweetsDict = dict() for tweetId in cleanedTweets: tweetLst = list() tweetLst.append(rawTweets[tweetId]) tweetLst.append(cleanedTweets[tweetId]) tweetLst.append(tweetSentiment[tweetId]) tweetsDict[tweetId] = tweetLst tools.write_csv(searchTerm + '.csv', 'w', tweetsDict) validInput = True break else: print("No tweets found. Please try searching another trending keyword.\n") validInput = False break if not validInput: continue while True: ch = input("Choose from the below options:\n\t1. Pie Chart\n\t2. Word Cloud\n\t3. Search another " "keyword\n\t4. Exit\nEnter your choice: ") if ch == '1': print("Data visualisation in Pie Chart") print("Loading...") dataAnalysis.generatePieChart(searchTerm) elif ch == '2': print("Data visualisation in Word Cloud\n") print("Loading...") dataAnalysis.generateWordCloud(searchTerm) elif ch == '3': break elif ch == '4': print("Thank You. Good Bye!") exit(0) else: print("Incorrect choice. Please re enter your choice.\n") continue
def __init__(self, trending): self.trending = trending self.tc = TwitterClient() self.start_twitter_stream() thread = Thread(target=self.start_stock_twits_stream) thread.start()
def test_twitter_streaming(): client = TwitterClient() client.stream_by_hashtag("#hello")
def __init__(self): self.twitter_client = TwitterClient().get_client() self.weather_requester = WeatherRequester() self.logger = logging.getLogger('HottestCityBot')