def addCrawl(self, box=None, filter=None, db=None): # creates and runs a new crawler # box is comma separated string with twitter form # filter is an array of words used to generate a twitter filter for those words # gives tweets that are in box OR pass filter if (box != None or filter != None) and (len(self.threads) < len(self.oauths)): tempEvent = Event() self.crawlStop.append(tempEvent) self.filters.append(filter) tempOauth = self.oauths.pop(0) tempCrawler = None if box == None and filter != None: tempCrawler = TwitterStream(auth=tempOauth).statuses.filter( track=",".join(filter)) elif filter == None and box != None: tempCrawler = TwitterStream(auth=tempOauth).statuses.filter( locations=box) elif filter != None and box != None: tempCrawler = TwitterStream(auth=tempOauth).statuses.filter( locations=box, track=",".join(filter)) self.threads.append( MyThread(tempCrawler, tempEvent, self.tweets, self.mutex, db)) self.threads[len(self.threads) - 1].start() self.oauths.append(tempOauth) pass
def run_python_twitter(): twitter_stream = TwitterStream( auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET)) tweets = twitter_stream.statuses.filter(track=['twitter']) counter = 0 for tweet in tweets: print "Got a tweet......." if tweet.get('text'): print "Got tweet..." counter += 1 user = tweet['user'] u = User(id=user['id'], screen_name=user['screen_name'], followers_count=user['followers_count']) t = Tweet(id=tweet['id'], text=tweet['text'], user_id=u.id, favorite_count=tweet['favorite_count'], created_at=parser.parse(tweet['created_at']), retweet_count=tweet['retweet_count'], truncated=tweet['truncated'], coordinates=json.dumps(tweet['coordinates']), place=json.dumps(tweet['place'])) Session.add(u) Session.add(t) # Commit every 20 tweets if counter >= 20: Session.commit() counter = 0
def main(): username = '******' quotes = 'quotes.txt' #Opens quotes file for quotes of anguish (line fo each quote) with open(quotes) as f: quotes = [line.strip() for line in f if line != "\n"] pprint(quotes) auth = OAuth(ACCESS_TOKEN, ACCESS_TOKEN_SECRET, API_KEY, API_SECRET) t = Twitter(auth=auth) ts = TwitterStream(domain='userstream.twitter.com', auth=auth) stream = ts.user() for tweet in stream: #pprint(tweet) if 'event' in tweet: print('received event %s' % tweet['event']) elif 'hangup' in tweet: return elif 'text' in tweet and tweet['user']['screen_name'] != username: print('from @%s: %s' % (tweet['user']['screen_name'], tweet['text'])) line = random.choice(quotes) print('responding with line: %s' % line) reply = '@' + tweet['user']['screen_name'] + ' ' + line t.statuses.update(status=reply, in_reply_to_status_id=tweet['id'])
def print_tweet_stream(tweet_count=100): # Print to screen tweets from the live stream oauth = OAuth(credentials.ACCESS_TOKEN, credentials.ACCESS_SECRET, credentials.CONSUMER_KEY, credentials.CONSUMER_SECRET) # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) # Get a sample of the public data following through Twitter iterator = twitter_stream.statuses.sample() # Print each tweet in the stream to the screen for tweet in iterator: tweet_count -= 1 # Twitter Python Tool wraps the data returned by Twitter # as a TwitterDictResponse object. # We convert it back to the JSON format to print/score print json.dumps(tweet) # The command below will do pretty printing for JSON data, try it out # print json.dumps(tweet, indent=4) if tweet_count <= 0: break
def run(self): logging.info("Starting, hashtag: %s", self.hashtags) while (self.do_run): try: logging.debug("Connecting & authenticating") self.twitter_stream = TwitterStream(auth=OAuth( token=self.auth_config['token'], token_secret=self.auth_config['token_secret'], consumer_key=self.auth_config['consumer_key'], consumer_secret=self.auth_config['consumer_secret'])) logging.debug("Connecting filtered stream") iterator = self.twitter_stream.statuses.filter( track=','.join(self.hashtags)) for tweet in iterator: try: if 'user' in tweet: user_id = tweet['user']['id'] user_name = tweet['user']['name'] user_profile_img_url = tweet['user'][ 'profile_image_url'] if tweet['entities']: for hashtag in self.hashtags: if hashtag in [ x['text'] for x in tweet['entities']['hashtags'] ]: self.queue.put( (hashtag, user_id, user_name, user_profile_img_url)) elif 'limit' in tweet: logging.warn("Rate limited, oustanding tweets %d", tweet['limit']['track']) elif 'disconnect' in tweet: logging.critical( "Stream disconnected code %d stream %s reason %s", tweet['disconnect']['code'], tweet['disconnect']['stream_name'], tweet['disconnect']['reason']) self.do_run = False except KeyError: logging.error("Unknown: %s", tweet) self.do_run = False if not self.do_run: break except TwitterHTTPError as ex: logging.error(ex) time.sleep(30) continue except HTTPError as ex: logging.error(ex) break logging.info("Finishing")
def stream_tweets(self, total_tweets, output_path='tweets-from-stream.json', filter=[]): # authentication auth = OAuth(cred['token'], cred['token_secret'], cred['consumer_key'], cred['consumer_secret']) # stream twitter object stream = TwitterStream(auth=auth) # counter for tweets tweet_count = 0 initial_time = int(round(time.time() * 1000)) for tweet in stream.statuses.filter(track=filter): with open(output_path, 'a') as jsonfile: json.dump(tweet, jsonfile) jsonfile.write("\n") # Add new line because Py JSON does not tweet_count = tweet_count + 1 print(tweet_count) # if tweet_count % 100 == 0: # print('tweets streamed: %s' % tweet_count) if tweet_count >= total_tweets: break final_time = int(round(time.time() * 1000)) print('\ntotal tweets streamed: %s' % tweet_count) print('tweets saved in file: %s' % output_path) print('total run time: %s ms' % (final_time - initial_time))
def get_dataset(location="europe", num_of_tweets=500): location = LOCATIONS[location] CONSUMER_KEY = os.getenv("TWITTER_CUSTOMER_KEY") CONSUMER_SECRET = os.getenv("TWITTER_CUSTOMER_SECRET") ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_KEY") ACCESS_SECRET = os.getenv("TWITTER_ACCESS_SECRET") oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) twitter = TwitterStream(auth=oauth) iterator = twitter.statuses.filter(locations=location, language="en") iterator_list = [iterator] tweets_dataset = [] try: for i in iterator_list: tweet_count = num_of_tweets for tweet in i: if "RT @" not in tweet["text"]: tweets_dataset.append(tweet["text"]) tweet_count -= 1 print tweet["text"] if tweet_count == 0: break except Exception: pass return tweets_dataset
def initialize(credentials): oauth = OAuth(**credentials['twitter']) twitter_stream = TwitterStream(auth=oauth) kinesis = boto3.client('firehose', region_name='us-east-1', **credentials['aws']) return twitter_stream, kinesis
def get_recent_tweets(intent): try: count = int(intent["slots"]["count"]["value"]) if count > 20: count = 20 # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) # Get a sample of the public data following through Twitter iterator = twitter_stream.statuses.sample() msg = "<speak> recent " + str(count) + " tweets are <break time='1s'/>" for tweet in iterator: count -= 1 try: if 'text' in tweet: # only messages contains 'text' field is a tweet msg += "user name is " + tweet['user']['name'] msg += "tweet is " + tweet['text'] + " <break time='1s'/> " except: continue if count <= 0: break msg += "</speak>" return build_response({}, build_speechlet_response("", msg, "", True)) except: pass return get_welcome_response()
def get_twitter_stream(): stream = TwitterStream( domain="userstream.twitter.com", api_version="1.1", auth=OAuth(**TWITTER), ) return stream.user()
def harvest_tweets(cls, num_of_tweets, lang, geo): """ Get tokenized text of the tweets and hashtags in specific language and location :param num_of_tweets: number of needed tweets :param lang: languages of the needed tweets :param geo: location of needed tweets Writes tweet-messages into the file PROJECT_LOCATION/output_files/tweets.txt Writes hashtags from collected tweets into the file PROJECT_LOCATION/output_files/hashtags.txt """ params = cls._get_settings('../settings.txt') auth = OAuth(params["access_token_key"], params["access_token_secret"], params["api_key"], params["api_secret"]) stream = TwitterStream(auth=auth) tweet_iter = stream.statuses.filter(locations=geo, languages=lang) with open("../output_files/tweets.txt", "w") as tweets: with open("../output_files/hashtags.txt", "w") as hashtags: count = 0 for tweet in tqdm(tweet_iter): if count > num_of_tweets: break if not 'text' in tweet: print('No text in tweet ', tweet) continue new_tweet = TweetTokenizer().tokenize( tweet["text"].lower()) for word in new_tweet: if not re.match(re.compile(url_pattern), word): tweets.write(word + " ") for ht in tweet["entities"]["hashtags"]: hashtags.write(ht["text"].lower() + " ") count += 1
def status(self): ACCESS_TOKEN = '704014440409108480-lCPbJXuHznMhKqrs5eawZ4UIUNMUVzE' ACCESS_SECRET = '4R9B8kGQVwkedwCp5JnL9AT5jGWQI3Revx1h7eYPt6GRf' CONSUMER_KEY = 'Gpd8ogDa5nDpxPTwk1uBoMxPy' CONSUMER_SECRET = 'H7UI3cEBa9ES5oifTrSEbuk0jnNmlGkSOX4SotJeERKJzLonjI' oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) # Get a sample of the public data following through Twitter iterator = twitter_stream.statuses.sample() tweet_count = 1 t_file = open("tweets.txt", 'w') for tweet in iterator: t_file.write(json.dumps(tweet)) t_file.close() tweet_count -= 1 if (tweet_count <= 0): break return 'Fetched from Twitter'
def handle_twitter(): #Return data from Twitter ACCESS_TOKEN = '759067855123996673-SMh5suAmoGjFjLe9uGnT8kDjBAdygkJ' ACCESS_SECRET = 'mXd44Jg5QOkhKmO310ex4Zwabe6wEeApZnC2YEuKdHZVz' CONSUMER_KEY = 'pUIwbWWj9nqjQNRU4mioXHnCJ' CONSUMER_SECRET = 'ukObCLCVITbL1biri3jheZHsoVeq5iLVplKcsUa1EeczKB8d2G' #Read train data with sentiment positive and negative oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) twitter_stream = TwitterStream(auth=oauth) twitter = Twitter(auth=oauth) x = [] for i in range(0, 1): #Read tweets with Hillay Clinton statuses iterator = twitter_stream.statuses.filter(track="@HillaryClinton,", language="en") x.append(iterator) #iterator = twitter.search.tweets(q='HillaryClinton', lang='en', count=10000) collectobj = [] #Create a Data Frame from Json object with coloumns as id, User_name, Tweet_test, Location, probability for iterator in x: for tweet in iterator: if 'user' in tweet.keys() and tweet['user']['location']: #Get probability of a tweet being positive, negative or neutral topic = getprobtop(tweet['text']) obj = {'id' : tweet['user']['id'], 'User_name' : tweet['user']['screen_name'], \ 'Text': tweet['text'],'location' : tweet['user']['location'], \ 'status' : topic} print("Returning from prob") collectobj.append(obj) table = json_normalize(collectobj) return (table)
def run_stream(keyword='eurusd', filter_by_followers=None): try: read_api_keys() oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) logging.info("Loaded API keys") except: logging.info("Unable to load API keys (missing or out of date") return # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) iterator_succeeded = False fails = 0 while not iterator_succeeded: try: iterator = twitter_stream.statuses.filter(track=keyword, language="en") iterator_succeeded = True logging.info("Now listening for tweets") except TwitterHTTPError: if fails <= 5: wait_time = 60 * (2**fails) else: wait_time = 60 * 16 fails += 1 logging.info( "HTTP Error. sleeping for {} seconds then attempt to reconnect" .format(wait_time)) iterator_succeeded = False time.sleep(wait_time) filename = r'tweet_data/' + time.strftime( "%d-%m-%y") + '_raw_tweets' + '.csv' if not os.path.exists('tweet_data'): os.makedirs('tweet_data') if not os.path.isfile(filename): with open(filename, 'a') as f: writer = csv.writer(f) writer.writerow( ['date_time', 'user_id', 'num_followers', 'tweet_text']) logger.info("Writing tweets to: {}".format(filename)) for tweet in iterator: print('pre filter') print(tweet['text']) while not (int(time.strftime("%H")) == 23 and int(time.strftime("%M")) >= 58): fields = [ tweet['created_at'], tweet['user']['id'], tweet['user']['followers_count'], tweet['text'] ] print(tweet['text']) if filter_by_followers: if filter_by_followers <= fields[2]: with open(filename, 'a') as f: writer = csv.writer(f) writer.writerow(fields) else: with open(filename, 'a') as f: writer = csv.writer(f) writer.writerow(fields) logger.info("Stopped listening to the day's tweets")
def twitter_streaming(keyword, number_of_tweets): oauth = OAuth(access_token, access_secret, consumer_key, consumer_secret) # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) # Get tweets by keyword iterator = twitter_stream.statuses.filter(track=keyword, language="en") # Print each tweet in the stream to the screen and write to a file tweet_count = number_of_tweets f_out = open('Stream_%d_%s_tweets.json' % (number_of_tweets, keyword), 'a') for tweet in iterator: tweet_count -= 1 # Twitter Python Tool wraps the data returned by Twitter # as a TwitterDictResponse object. # We convert it back to the JSON format to print/score print(json.dumps(tweet)) f_out.write(json.dumps(tweet)) f_out.write('\n') # The command below will do pretty printing for JSON data, try it out # print json.dumps(tweet, indent=4) if tweet_count <= 0: break f_out.close()
def connect(self, heartbeat_timeout=60 * 5): ''' Initiate the connection to Twitter Streaming API. Heartbeat timeout is 90 seconds by default. If no new tweet is received by the stream, connection shall be terminated. Increase it to 5 minutes. ''' self.stream = TwitterStream(auth=self.oauth, heartbeat_timeout=heartbeat_timeout)
def __init__(self): if (twitter_imported): self.oAuth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) #Init connection to twitter streaming api self.twitter_stream = TwitterStream(auth=self.oAuth) self.twitter = Twitter(auth=self.oAuth) self.tweets = []
def stream(self): """Listens to your feed, and updates it whenever someone posts a new tweet.""" twitter_stream = TwitterStream(auth=authenicate(), domain='userstream.twitter.com') for data in twitter_stream.user(): self.feed.values = self.update_feed(data)
def auth_func(): # Retrieve cred for twitter api credentials = yaml.load(open(os.path.expanduser('./api_cred.yml'))) twitter_stream = TwitterStream(auth=OAuth(**credentials['twitter'])) #connecting to firehose c = boto3.client('firehose', region_name='us-east-1') return c, twitter_stream
def get_and_process_tweets(oauth, batch_count=10): print("Starting Twitter Batch Processing") # Get twitter stream twitter_stream = TwitterStream(auth=oauth) iterator = twitter_stream.statuses.sample() for tweet in iterator: print(tweet)
def start_stream(self): """ creates the connection to the twitter streaming endpoint""" _stream = TwitterStream(auth=self.twitter_api) # locations[seattle, new york] tweet_iterator = _stream.statuses.filter(track=self.q, language='en', locations='-122.436232,47.495315,-122.2249728,47.734319,-74.255641,40.495865,-73.699793,40.91533') for tweet in tweet_iterator: print(tweet['text'], end='\n\n') self.write_db(tweet)
def connect_twitter(self, config_filepath='/xxxx/xxxx/.xxxx/xxxx.yml'): credentials = yaml.load(open(os.path.expanduser(config_filepath))) credentials = credentials['twitter'] token = credentials.get('token') token_secret = credentials.get('token_secret') consumer_key = credentials.get('consumer_key') consumer_secret = credentials.get('consumer_secret') t = TwitterStream( auth=OAuth(token, token_secret, consumer_key, consumer_secret)) return t
def open_stream(self): """ Opens an interface to the Twitter API and opens a stream. """ t = Twitter(auth=self.auth) ts = TwitterStream(domain='userstream.twitter.com', auth=self.auth) self.twitter = t self.stream = ts.user() self.iterator = iter(self.stream)
def getSummary(self, inp): ACCESS_TOKEN = '949253929-o6YjLSBSF6wLSqRgazQ0Dgg3g1YgaSowvVdQC0iK' ACCESS_SECRET = 'cA3gerIJIssnMR83vqXSAHPpI5wloJ2DhuDlHXy97qMCO' CONSUMER_KEY = 'SPpUOTLLnqlzR8roaO7adrGMi' CONSUMER_SECRET = 'ZRaA1vB9FMFG6QXKZR2wDMrrjD2oZi4bTTG9TSDaqL6akzdoha' LANGUAGE = 'English' oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) twitter_stream = TwitterStream(auth=oauth) iterator = twitter_stream.statuses.sample() text_file = open('Output.txt', 'w') tweet_count = 1000 iterator = twitter_stream.statuses.filter(track=inp, language='en', tweet_mode='compatibility') for tweet in iterator: tweet_count -= 1 keys = tweet.keys() # print(json.dumps(tweet)) if 'text' in tweet: tweettext = tweet['text'] print(tweet_count) cleanstring = re.sub( '(@[A-Za-z0-9]:)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)', ' ', tweettext) c = re.sub('RT', ' ', tweettext) l = c.split() for word in l: if (word.startswith("@")): l.remove(word) if (word.startswith("http://")): l.remove(word) if (word.startswith("https://")): l.remove(word) if (word.startswith("&")): l.remove(word) cleanString = ' '.join(l) print(cleanString) try: text_file.write(cleanString) text_file.write(".") except UnicodeEncodeError: tweet_count - +1 if tweet_count <= 0: break text_file.close() print("\n") print("SUMMARY") ss = summarize.SimpleSummarizer() file = open('Output.txt', 'r') data = file.read() summ = ss.summarize(data, 5, inp) summary = "\n".__add__(summ) print(summary) return summary
def auto_reply(): twitter_stream = TwitterStream(auth=get_auth()) theme = pred1 = pred2 = '' first_stage = {} neta_list = [] stage_num = 3 print('activate auto reply') for tweet in twitter_stream.statuses.filter(language='ja', track='@milkboy_core_ai テーマ'): start_t = time.time() stage_max = 3 print(tweet) try: theme = tweet['text'].split()[-1] if '@' in theme or len(theme) > 30: continue tle = False while True: try: seed = random.randint(0, 100000) neta_list = generate_neta_list(theme, seed, stage_max) stage_num = len(neta_list) if time.time() - start_t > 30: tle = True break except: continue first_stage = neta_list[0] if stage_num > 1 else neta_list[-1] pred1, pred2 = first_stage['pred1'], first_stage['pred2'] print(pred1) if pred1 != '' and pred2 != '': break if tle: continue # つかみ text1, text2 = tsukami_script(theme, first_stage['tsukami']) first_tweet = update_status(text1) data = update_status(text2, first_tweet['id']) # 導入 texts = introduction(first_stage['category'], pred1, pred2) data = multiple_tweets(texts, data) for i in range(stage_num): neta = neta_list[i] if i < stage_num - 1 else neta_list[-1] feat_text = [f"駒場「{neta['featX']}」\n\n", f"内海「{neta['featX_reply']}」\n\n", f"駒場「{neta['anti_featX']}」\n\n", f"内海「{neta['anti_featX_reply']}」\n\n"] if i != stage_num - 2: feat_text.append(f"駒場「{neta['conjunction']}」\n\n") if i == stage_num - 1: feat_text.append("内海「いや、絶対ちゃうやろ。」\n\n") feat_text.append("内海「もうええわ、どうもありがとうございました。」\n\n") data = multiple_tweets(feat_text, data) reply_text = f"@{tweet['user']['screen_name']}\nネタを投稿しました!\n" reply_text += f"https://twitter.com/milkboy_core_ai/status/{first_tweet['id']}" update_status(reply_text, tweet['id_str']) except: continue
def main(): # Variables that contains the user credentials to access Twitter API oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET) # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) # scraping tweets the contain one ore more of the words that we are tracking iterator = twitter_stream.statuses.filter( track= "eighthamendment, 8thAmendment, repealthe8th, repealtheeighth, 8thRef, 1thing4choice, realityofrepeal, time4choice, savethe8th, savetheeighth, save8Rally, rally4life, strike4repeal, riseandrepeal, right2life", language="en") for tweet in iterator: JSONdata = json.dumps(tweet) if "full_text" in JSONdata: #Only looks at tweets that have full text in it text = get_full_text( tweet) #extracts the full text in the json file if text is not None: #As long as the full_text was found json_id = make_id() #create a unique timestamp id with open( json_id, "w+" ) as f: #give it its own file for the whole json data json.dump(tweet, f) data = json.load(open(json_id)) #load the json data location = find_tweet_location(data) # get the tweet location location = str(location) #turn it into a string #writes tweet id num, actual tweet text, user location and sentiment analysis score to a master file with open("data/all_tweets.json", "a+") as fi: fi.write(json_id + " ") json.dump(text, fi) sentenceAlph = re.sub( "[^a-zA-Z ]", "", text) #removes anything that is not a letter score = str( round( SentiWordsSentence.sentTweetWords_final_score( sentenceAlph), 2) ) #get the sentiment analysis score and round it to 2 decimal places fi.write(" " + location + " " + score + "\n") time.sleep(sleep_time) #reduces chances of rate limiting
def stream_tweets(): oauth = OAuth(access_token, access_secret, consumer_key, consumer_secret) twitter_stream = TwitterStream(auth=oauth) iterator = twitter_stream.statuses.sample() tweet_count = 5 for tweet in iterator: tweet_count -= 1 print(json.dumps(tweet)) if tweet_count <= 0: break
def create_tweet(new_status): oauth = OAuth(cred.user_info['ACCESS_TOKEN'], cred.user_info['ACCESS_SECRET'], cred.user_info['CONSUMER_KEY'], cred.user_info['CONSUMER_SECRET']) # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) twitter = Twitter(auth=oauth) #new_status = 'Mandatory Hackathon Post' results = twitter.statuses.update(status=new_status)
def listen(self): wordFormat = WordFormat() fcm = FCM() exportSheet = ExportSheet() oauth = OAuth(self.ACCESS_TOKEN, self.ACCESS_SECRET, self.CONSUMER_KEY, self.CONSUMER_SECRET) print("Running twitter stream...") try: # Initiate the connection to Twitter Streaming API twitter_stream = TwitterStream(auth=oauth) # Get a sample of the public data following through Twitter # location lng/lat pair, 1st: south-west 2nd: north-east # My ID: 80337313 # SMRT_Singapore: 307781209 # SBSTransit_Ltd: 3087502272 stream = twitter_stream.statuses.filter( follow="307781209, 80337313, 3087502272", language="en") except Exception as e: print("Connecting to twitter error: {}".format(e)) print("Waiting for twitter msg...") for tweet in stream: try: tweetJson = json.dumps(tweet) print(tweetJson) jsonData = self.extractTweet(tweetJson) if jsonData != None: ori_tweet = jsonData['text'] replaced_tweet, translated_tweet = wordFormat.translateTweet( jsonData['text']) jsonData['text'] = translated_tweet if jsonData['id_str'] == '3087502272' or jsonData[ 'id_str'] == '307781209': fcm.send_default(jsonData) print("Send default notification") else: fcm.send_topic(jsonData, 'DEBUG') print("Send debug notification") exportSheet.writeToSheet(jsonData['timestamp_ms'], ori_tweet, replaced_tweet, translated_tweet) print("Waiting for twitter msg...\n") except Exception as e: print("Error in tweet stream: {}".format(e))
def main(): # TODO Receber as chaves por parâmetros autorizacao = OAuth('2288882125-fEUDrrww3Bo2r96Ub18jZVCiARmKHZl8giFuEmI', 'k89v5bOPu0ZsovQBz0nFoEgSl11C3lC3blMLnptfNOqpp', 'CV4n05WmlUa94jas5BZkXrb7X', 'gAjMkUjyVAo3ahIDhDYBQRisxgOWDPMRsAC74yH9S9a4iLMc75') con = TwitterStream('stream.twitter.com', auth=autorizacao) ite = con.statuses.filter(track='#EC2014') for item in ite: with open(item['id_str'] + '.json', mode='w') as arq: arq.write(dumps(item))