Exemple #1
0
def main():
    '''Main entry method to program'''

    # Get the arguments from the command-line
    auth = parse_args().auth
    update = parse_args().update
    search = parse_args().search
    #dump = parse_args().dump

    if update:
        # Update Twitter status
        print "About to update Twitter status..."
        auth = authenticate()
        update = ' '.join(update)
        response = status_update(auth, update)
        print "Twitter response: %s" % response
    elif auth:
        # Authorise ctc with Twitter account
        get_access_tokens()
    elif search:
        # Stream a search
        listener = StdOutListener()
        auth = get_oauth_handle()
        stream = Stream(auth, listener)
        try:
            stream.filter(track=[search])
        except KeyboardInterrupt as ex:
            print "Aborting stream..."
Exemple #2
0
class Streamer( object ):

	def __init__( self, queue, terms=[], consumer=None, consumer_secret=None,
			token=None, secret=None):

		if consumer == None or consumer_secret == None or token == None or secret == None:
			config = configparser.ConfigParser()
			config.readfp( open( os.path.expanduser( "~/.slackTwitter" ) ) )

			consumer = config.get( "twitter", "consumer" )
			consumer_secret = config.get( "twitter", "consumer_secret" )
			token = config.get( "twitter", "token" )
			secret = config.get( "twitter", "secret" )

		auth = OAuthHandler( consumer, consumer_secret )
		auth.set_access_token( token, secret )

		listener = StreamerListener()
		self.stream = TweepyStream( auth=auth, listener=listener )

		self._queue = queue
		self._terms = terms
	
	def start( self ):
		self.stream.filter( track=self._terms )
Exemple #3
0
def stream_twitter(battle_id):
    #Avoiding circular import
    from battle.models import Battle

    battle = Battle.objects.get(id=battle_id)
    if battle.end_time < timezone.now():
        return

    battle.battlehashtags_set.update(typos=0, words=0)
    battle_hashtags = battle.battlehashtags_set.all().prefetch_related('hashtag')
    if battle_hashtags.count() == 0:
        return

    hashtag_values = [x.hashtag.value for x in battle_hashtags]

    listener = TwitterStreamListener(battle_hashtags)
    auth = OAuthHandler(
        settings.TWITTER_CONSUMER_KEY,
        settings.TWITTER_CONSUMER_SECRET
    )

    auth.set_access_token(
        settings.TWITTER_ACCESS_TOKEN,
        settings.TWITTER_ACCESS_TOKEN_SECRET
    )

    stream = Stream(auth, listener)

    delay = battle.end_time - timezone.now()
    Timer(delay.total_seconds(), stream.disconnect).start()

    stream.filter(track=hashtag_values, languages=['en'])
Exemple #4
0
def main():
	args = ArgParser.parse()
	#print 'credentials filename : ', args.credentials
	if args.credentials != None:

		print '********* Retreive credentials ****************'
		credentials = CredentialsReader.read(args.credentials)
		consumer_key = credentials['consumer_key']
		consumer_secret = credentials['consumer_secret']
		access_token = credentials['access_token']
		access_token_secret = credentials['access_token_secret']

		print '********* Twitter Authentication ***********'
		# Twitter authentification
		auth = OAuthHandler(consumer_key, consumer_secret)
		auth.set_access_token(access_token, access_token_secret)

		ol = OutputListener() # instance of OutputListener
		stream = Stream(auth=auth, listener=ol)

		# Start a stream
		print '************** Start a stream *****************'
		if args.kw_file != None and len(args.args_list) == 0:
			keywords = []
			with open (os.path.join(os.getcwd(), args.kw_file), 'r') as f:
				keywords = f.readlines()
			keywords = [x.strip('\n') for x in keywords]
			#print 'kewords from file: ', keywords
			stream.filter(track=keywords)
		elif args.kw_file is None and len(args.args_list) > 0:
			#print'args list:',  args.args_list
			stream.filter(track=args.args_list)
		else:
			print 'Impossible d\'utiliser les deux options en meme temps'
    def run(self):
        l = StdOutListener(self)
        auth = OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)

        stream = Stream(auth, l)
        stream.filter(track=self.tags, languages=['en'])
    def handle(self, *args, **options):
        
        politicians = Politician.objects.all();

        politician_keywords = []
        for politician in politicians:
            politician_keywords.append(politician.first_name + " " + politician.last_name)
            if politician.twitter_url:
                indexSlash = politician.twitter_url.rfind("/")
                indexQuestionMark = politician.twitter_url.rfind("?")
                if indexQuestionMark != -1:
                    twitter = politician.twitter_url[indexSlash+1:indexQuestionMark]
                else:
                    twitter = politician.twitter_url[indexSlash+1:]
                politician_keywords.append(twitter)
        
        # create instance of the tweepy tweet stream listener
        listener = TweetStreamListener()

        # set twitter keys/tokens
        auth = OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)

        # create instance of the tweepy stream
        stream = Stream(auth, listener)


        # search twitter for "congress" keyword
        stream.filter(track=politician_keywords)
        
Exemple #7
0
    def __init__(self,slacker):

        # auth
        auth = OAuthHandler(settings.twitter_consumer_key, settings.twitter_consumer_secret)
        auth.set_access_token(settings.twitter_access_token, settings.twitter_access_token_secret)

        # out
        l = StdOutListener(slacker)

        # stream
        stream = Stream(auth, l)
        print("opening twitter stream")
        # if only a certain list
        if FILTER_LIST:
            api = API(auth)
            employees = api.list_members(LIST_USER,LIST)
            list = map(lambda val: str(val),employees.ids())
            #print(list)
            print("only List: "+LIST)
            stream.filter(follow=list)
        elif FILTER_KEYWORDS:
            print("only Keywords: "+str(KEYWORDS))
            stream.filter(track=KEYWORDS)
        else:
            print("your timeline")
            stream.userstream()
def stream(buff, terms):
    l = StdOutListener(buff)
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)

    stream = Stream(auth, l)
    stream.filter(track=[terms])
Exemple #9
0
def main():
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    backend = FileBackend("./test-db")
    stream = Stream(auth, l)
    stream.filter(track=['トレクル'])
def run_twitter_query():
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)
    #names = list(np.array(get_companies())[:,1])
    #print names[num1:num2]
    d = hand_made_list()
    search_list = []
    for key, value in d.items():
        if key == 'SPY':
            search_list.append(value[0]) # append the full anme of the symbol
            search_list.append('#SP500') # Don't append #SPY because it's not helpful
            search_list.append('$SP500')
        elif key == 'F':
            # search_list.append(value[0]) # append the full name of the symbol
            search_list.append('Ford') # append the name of the symbol
        elif key == 'GE':
            search_list.append('General Electric') # append the full anme of the symbol
        elif key == 'S':
            search_list.append('Sprint') # append the full anme of the symbol
        elif key == 'T':
            search_list.append('AT&T') # append the full anme of the symbol
        elif key == 'MU':
            search_list.append('Micron Tech') # append the full anme of the symbol
        elif key == 'TRI':
            search_list.append('Thomson Reuters') # append the full anme of the symbol
        else:
            for cell in value:
                search_list.append(cell)

    stream.filter(track=search_list)
def TwitterStream(kwords, lim, lang=['en'], loca=[-180, -90, 180, 90]):
    # print kwords, lang, lim, loca
    global limit
    if type(lim) != tuple:
        l = StdOutListener()
        limit = int(lim)
    else:
        day = int(lim[0])
        hour = int(lim[1])
        minute = int(lim[2])
        second = int(lim[3])
        l = StdOutListener_time()
        print time.time()
        limit = time.time() + 86400 * day + 3600 * \
            hour + 60 * minute + 1 * second
        print limit

    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)

    global results
    results = list()
    stream = Stream(auth, l)

    # print kwords, lang
    stream.filter(track=kwords, languages=['en'])
    # def filter(self, follow=None, track=None, async=False, locations=None,
    #            stall_warnings=False, languages=None, encoding='utf8'):
    return results
Exemple #12
0
def main():
    auth = OAuthHandler(ckey, csecret)
    auth.set_access_token(atoken, asecret)
    twitterStream = Stream(auth, listener())
    if not file_exist('db_tweet.csv'):
        cabecalho('db_tweet.csv')
    twitterStream.filter(locations=[-46.825390,-24.008381,-46.364830,-23.357611])
def minetweets():
    line = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, line)
    # stream.filter(track=['Watson', 'Cognitive', 'Machine Learning'])
    stream.filter(track=args, languages=["en"])
Exemple #14
0
 def get_tweet_stream(self, track, game_id, game_name):
     index = self.get_auth()
     self.logger.info('Using auth: ' + str(self.auth.consumer_key))
     self.set_paths(track, game_id, game_name)
     stream = Stream(self.auth, self)
     stream.filter(track=[track], async=True)
     return stream, index
    def startListen(self):
        auth = OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
 
        stream = Stream(auth, self)
#     stream.filter(track=[positiveEmoticons])
        stream.filter(locations = [113.90625,-43.665217,157.148438,-13.35399])
def mainloop():
    # Authenticate OAuth
    auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
    auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)

    # Setup our API.
    api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
    if not api:
        print ("Can't authenticate")
        sys.exit(-1)

    # Iterate through all the usernames of the followers we want to follow
    # and extract their ID to be placed into an array of ID's our bot will
    # follow and automatically re-tweet.
    followers_array = []
    for screen_name in RETWEET_FOLLOWERS:
        profile = api.get_user(screen_name)
        followers_array.append(str(profile.id))

    for screen_name in LIKE_FOLLOWERS:
        profile = api.get_user(screen_name)
        followers_array.append(str(profile.id))

    # Run the streamer.
    stream = Stream(auth = api.auth, listener=ListenerAndRetweeter(api))
    stream.filter(follow=followers_array,track=[])
def streaming_tweets(keywords, language=["en"]):
    """
    @keywords   ==  search keywords, e.g. ["ImWithHer", "Trump"]
    @languages  ==  desired language, e.g.: ["en"]

    """

    filename = keywords[0].replace(" ", "").replace("#", "") + ".csv"
    print(filename)

    try:
        start_csv()
        while True:
            try:
                #Initialize Tweepy Streamer
                twitterStream = Stream(auth, TwitterListener())
                twitterStream.filter(track=keywords, languages=language)
            except Exception as error:
                print(error)
                print("[*] saving results to {}".format(filename))
                os.rename("streaming_results.csv", filename)

    except KeyboardInterrupt:
        print("[*] User KeyboardInterrupt: Tweepy Streamer Haulted")
        print("[*] saving results to {}".format(filename))
        os.rename("streaming_results.csv", filename)
    def get_tweets(cls, keyword):

        global tweet_file

        # get the auth
        auth = cls.get_auth()
        # define the listener
        listener = StdOutListener()
        # define stream object
        stream = Stream(auth, listener)

        # define the api object
        api = tweepy.API(auth)

        current_milli_time = str(int(round(time.time() * 1000)))
        # open a file to write tweets
        tweet_file = open(keyword+'_'+current_milli_time+'.txt', 'a')

        try:
            # get past tweets, max 500
            result = tweepy.Cursor(api.search, q=keyword).items(10)
            for tweet in result:
                tweet_file.write(tweet.text.encode("UTF-8"))
                tweet_file.write('\n')
                #pprint(tweet)

            # Close the file
            # tweet_file.close()

            # run live feeds
            stream.filter(track=[keyword])
        except Exception as ex:
            print(ex.message, ex)
        finally:
            tweet_file.close()
class TweetController:
    """docstring for Controller"""

    def __init__(self):
        self.settings = Settings()
        # self.auth = OAuthHandler(Listener.api_data["consumer_key"], Listener.api_data["consumer_secret"])
        # self.auth.set_access_token(Listener.api_data["access_token"], Listener.api_data["access_token_secret"])
        self.api = tweepy.API(self.settings.auth, parser=tweepy.parsers.JSONParser())

        self.db = DataBase()
        # self.tweet_gui = tweet_gui
        self.default_keyword = ['Obama', 'hillary ', 'Trump']
        self.db.create_table_if_not_exist()

    def start_stream(self):
        self.tweet_listener = Listener()
        self.stream = Stream(auth=self.settings.auth, listener=self.tweet_listener)
        self.stream.filter(track=self.default_keyword, async=True)

    def stop_stream(self):
        self.stream.disconnect()

    def set_keyword(self, default_keyword):
        self.default_keyword = default_keyword
        print(default_keyword)
class StreamConsumerThreadClass(threading.Thread):
    def __init__(self,term='',oauthfile=''):
        threading.Thread.__init__(self)
        self.searchterm = term
        self.name = term
        self.consume = True
        
        oauth = json.loads(open(oauthfile,'r').read())
        
        listener = MongoDBListener()
        auth = OAuthHandler(oauth['consumer_key'], oauth['consumer_secret'])
        auth.set_access_token(oauth['access_token'], oauth['access_token_secret'])
    
        self.stream = Stream(auth, listener,timeout=60)  
        
        
    def stopConsume(self):
        self.stream.disconnect()
      
    def run(self):
        now = datetime.datetime.now()
        print "Twitter Stream with terms: %s started at: %s" % (self.getName(), now)
        
        connected = False
        while True:
            try: 
                if not connected:
                    connected = True
                    self.stream.filter(track=[self.searchterm])
            except SSLError, e:
                print e
                connected = False            
def main(argv):
    query = ""
    try:
        opts, args = getopt.getopt(argv, "helps:w:i:", ["query="])
    except getopt.GetoptError:
        print ("stream.py --query <query>")
        sys.exit(2)

    for opt, arg in opts:
        if opt in ("-h", "--help"):
            print ("stream.py --query <query>")
            sys.exit()
        elif opt in ("-q", "--query"):
            query = arg


    array = []
    with open("keys.txt", "r") as ins:
        for line in ins:
            array.append(line.rstrip('\n'))
    l = StdOutListener()
    auth = OAuthHandler(array[0], array[1])
    auth.set_access_token(array[2], array[3])

    stream = Stream(auth, l)
    stream.filter(track=[query])
Exemple #22
0
def run(track_list):
    listener = StdOutListener()
    auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
     
    stream = Stream(auth, listener)
    stream.filter(track=track_list)
def setup_streams(auth):
    twitter_list = WBListener()
    #twitter_list2 = WBListener()
    global stream_obj

    stream_obj = Stream(auth, twitter_list)
    stream_obj.filter(track=['#trump'], async=True)
class TweetFetcher(StreamListener):
    def __init__(self, query, tweetstore, max_tweets=1000):
        super(TweetFetcher, self).__init__()
        self.tweet_store = tweetstore
        self._max_tweets = max_tweets
        self._query_terms = query.split()
        self._tweets = []
        self._count = 0
        self._alive = True
        auth = OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)

        self._stream = Stream(auth, self)
        self._stream.filter(track=self._query_terms, async=True)

    def on_data(self, data):
        if self._count < self._max_tweets and self._alive:
            tweet = json.loads(data)
            if tweet['lang'] == 'en':
                self.tweet_store.add_tweet(
                    {'guid': self._count, 'id': tweet['id'], 'text': tweet['text'], 'query': self._query_terms})

            self._count += 1
            return True
        else:
            print 'Reached tweet limit ... shutdown'
            return False

    def on_error(self, status):
        print 'Stream Error'
        print status
Exemple #25
0
def main():
#    dirname = os.path.dirname(inspect.getfile(inspect.currentframe()))
#    basename = os.path.basename(inspect.getfile(inspect.currentframe()))
    dirname = os.path.dirname(os.path.realpath(__file__))
    basename = os.path.basename(os.path.realpath(__file__))
    name_noextension = os.path.splitext(basename)[0]
    """Start log."""
    configinput = __import__("config" + name_noextension)
    outputDir = os.path.join(dirname, configinput.directory)
    if not os.path.exists(outputDir):
        os.makedirs(outputDir)
    logfilename = os.path.join(outputDir,basename) + ".log"
    logging.basicConfig(filename=logfilename,level=logging.DEBUG, format='%(asctime)s %(message)s')
    logging.info('Started')
    save_pid()


    """Execute the twitter api."""
    try:
        auth = OAuthHandler(configinput.consumer_key, configinput.consumer_secret)
        auth.set_access_token(configinput.access_token, configinput.access_secret)
        twitter_stream = Stream(auth, MyListener(os.path.join(dirname, configinput.directory), os.path.join(dirname, configinput.to_dir), basename))   # el segundo argumento es el nombre del archibvo json
        twitter_stream.filter(track=configinput.keyword_list_filter)
    except BaseException as e:
        logging.error('Failed to execute twitter api: ' + str(e))    
    
    logging.info('Finished')
Exemple #26
0
def start(args):

    if args["server"]:
        api.Run(nlp, userin, args["server"])
        if Config.TWITTER_CONSUMER_KEY != 'CONSUMER_KEY':
            auth = OAuthHandler(Config.TWITTER_CONSUMER_KEY, Config.TWITTER_CONSUMER_SECRET)
            auth.set_access_token(Config.TWITTER_ACCESS_KEY, Config.TWITTER_ACCESS_SECRET)
            userin.twitter_api = tweepy.API(auth)

            print("Listening Twitter mentions...")
            l = MentionListener(args)
            stream = Stream(auth, l)
            stream.filter(track=['DragonfireAI'], async=True)
    elif args["cli"]:
        while (True):
            com = raw_input("Enter your command: ")
            thread.start_new_thread(VirtualAssistant.command, (com, args))
            time.sleep(0.5)
    elif args["gspeech"]:
        from dragonfire.sr.gspeech import GspeechRecognizer
        recognizer = GspeechRecognizer()
        recognizer.recognize(args)
    else:
        from dragonfire.sr.deepspeech import DeepSpeechRecognizer
        recognizer = DeepSpeechRecognizer()
        recognizer.recognize(args)
def streamTweets(geobox, includeString, excludeString):
    '''Takes as inputs:
            geobox (latitudinal and longitudinal boundaries), a list of the form
            [W, S, E, N]
            includeString, terms to include, separated by line breaks
            excludeString, terms to include, separated by line breaks
    And then returns a stream of tweets, filtered by those inputs'''

    # gets term lists
    includeTerms = parseTerms(includeString)
    excludeTerms = parseTerms(excludeString)

    # USING TWITTER'S STREAMING API

    consumerKey = '****'
    consumerSecret = '****'

    # lets the app access twitter on behalf of my account
    accessToken = '****'
    accessSecret = '****'

    auth = OAuthHandler(consumerKey, consumerSecret)
    auth.set_access_token(accessToken, accessSecret)
     
    api = tweepy.API(auth)

    myListener = TwitterListener(includeTerms, excludeTerms)
    twitterStream = Stream(auth, myListener)
    twitterStream.filter(locations=geobox)

    return myListener.tweets
Exemple #28
0
    def run(self):
        global streamobj
        streamobj = Stream(self.auth, self.listener)

        #LOCATION OF USA = [-124.85, 24.39, -66.88, 49.38,] filter tweets from the USA, and are written in English
        streamobj.filter(locations = [-124.85, 24.39, -66.88, 49.38,], languages=['en'])
        return
    def storetwitterstream(self, destinationfile, query, lang=["fr"], starttime=time.time(), timelimit=60):

        class MyListener(StreamListener):
            def __init__(self, destinationfile, starttime, timelimit):
                self.outfile = self.TwitterApiUtil.db_location+destinationfile+".json"
                self.starttime=starttime
                self.timelimit=timelimit

            def on_data(self, data):
                while (time.time()-self.starttime)<self.timelimit:
                    try:
                        with open(self.outfile, 'a') as f:
                            f.write(data)
                            print(data)
                            return(True)
                    except BaseException as e:
                        print("Error on_data: %s" % str(e))
                        time.sleep(5)
                        pass
                    return(True)
                else: return(False)

            def on_error(self, status):
                print(status)
                return(True)

        twitter_stream = Stream(self.get_current_api().auth, MyListener(destinationfile,starttime,timelimit))
        twitter_stream.filter(track=query,languages=lang)
def stream_users(in_file, auth):    
    screen_names = [x.strip() for x in in_file]
    
    tw_list = user_ids(screen_names, auth)
    
    twitterStream = Stream(auth, Listener())
    twitterStream.filter(follow=tw_list)
Exemple #31
0
                point = 25
            elif p == "50hp":
                point = 50
        if point > 0:
            user = d["user"]["screen_name"]
            userdata = {
                "name": d["user"]["screen_name"],
                "fullname": d["user"]["name"],
                "pic": d["user"]["profile_image_url"],
                "src": "twitter"
            }
            print("Adding new score by", userdata)
            redis.sadd(user, json.dumps(userdata))
            redis.zincrby("leaderboard", user, point)
        return True

    def on_error(self, status):
        print(status)


if __name__ == '__main__':

    #This handles Twitter authetification and the connection to Twitter Streaming API
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)

    #This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
    stream.filter(track=['#iifutmaningen'])
Exemple #32
0
            dictTweet["_id"] = str(dictTweet['id'])
            doc = db.save(dictTweet)
            print("SAVED" + str(doc) + "=>" + str(data))
        except:
            print("Already exists")
            pass
        return True

    def on_error(self, status):
        print(status)


auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
'''========couchdb'=========='''
server = couchdb.Server('http://*****:*****@localhost:5984/'
                        )  #('http://115.146.93.184:5984/')
try:
    db = server.create('esmeraldasaa')
except:
    db = server('esmeraldasaa')
'''===============LOCATIONS=============='''

#Filtro por geolocalización
twitterStream.filter(locations=[-80.1029, 0.0618, -78.4267, 1.4696])
#Filtro por palabras
twitterStream.filter(
    track=['Elecciones Ecuador 2021', 'Andres Arauz', 'CENTRO'])
Exemple #33
0
        self.message_win.refresh()
        self.clear_message_win()

        # store in csv
        if overall % tweets_per_period == 0:
            self.index = datetime.datetime.now()

        self.df.set_value(self.index, self.header[0], key_word)
        self.df.set_value(self.index, self.header[1], overall)
        self.df.set_value(self.index, self.header[2], duration / 60)
        self.df.set_value(self.index, self.header[3], overall / duration)
        self.df.set_value(self.index, self.header[4], self.neg_count)
        self.df.set_value(self.index, self.header[5], neg_percent)
        self.df.set_value(self.index, self.header[6], self.pos_count)
        self.df.set_value(self.index, self.header[7], pos_percent)
        self.df.set_value(self.index, self.header[8], self.neu_count)
        self.df.set_value(self.index, self.header[9], neu_percent)

        self.df.to_csv(log_file)

        return True


try:
    twitter_stream = Stream(auth, MyListener())
    twitter_stream.filter(track=[key_word])
except KeyboardInterrupt as i:
    curses.endwin()
except Exception as e:
    print('Exeption: ' + str(e))
Exemple #34
0
 def stream_tweets(self, fetched_tweet_filename, hash_tag_list):
     # twitter authetication and the connection to twitter streaming api
     Listener = TwitterListener(fetched_tweet_filename)
     auth = self.twitter_authenticator.authenticate_twitter_app()
     stream = Stream(auth, Listener)
     stream.filter(track=hash_tag_list) 
Exemple #35
0
class StdOutListener(StreamListener):
    """ A listener handles tweets that are received from the stream.
    This is a basic listener that just prints received tweets to stdout.
    """
    def on_data(self, data):
        try:
            decoded = json.loads(data)
            date_tweet, text = '', ''
            if decoded['created_at']:
                date_tweet = parser.parse(decoded['created_at'])
            if decoded['text']:
                text = decoded['text']
        except:
            pass
        if date_tweet and text:
            Tweet(content=text, date=date_tweet).save()
        return True

    def on_error(self, status):
        pass


if __name__ == '__main__':
    Tweet.drop_collection()
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)
    stream.filter(track=keywords)
        except BaseException as e:
            print("Error on_data: %s" % str(e))
        return True

    def on_error(self, status):
        print(status)
        return True


#filter the tweets
if __name__ == '__main__':

    data_list = []
    NUM_OF_TWEETS = {'count': 0, 'seen': 0}

    access_token = ""
    access_secret = ""
    consumer_key = ""
    consumer_secret = ""

    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_secret)

    api = tweepy.API(auth)

    # Status() is the data model for a tweet
    tweepy.models.Status.first_parse = tweepy.models.Status.parse
    tweepy.models.Status.parse = parse
    twitter_stream = Stream(auth, MyListener())
    twitter_stream.filter(locations=LOCATION_UK, languages=["en"])
            #Antes de guardar el documento puedes realizar parseo, limpieza y cierto analisis o filtrado de datos previo
            #a guardar en documento en la base de datos
            doc = db.save(
                dictTweet)  #Aqui se guarda el tweet en la base de couchDB
            print("Guardado " + "=> " + dictTweet["_id"])
        except:
            print("Documento ya existe")
            pass
        return True

    def on_error(self, status):
        print(status)


auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())

#Setear la URL del servidor de couchDB
server = couchdb.Server('http://localhost:5984/')
try:
    #Si no existe la Base de datos la crea
    db = server.create('futbol')
except:
    #Caso contrario solo conectarse a la base existente
    db = server['test']

#Aqui se define el bounding box con los limites geograficos donde recolectar los tweets
#twitterStream.filter(locations=[-92.21,-5.02,-75.19,1.88])
twitterStream.filter(track=['futbol', 'soccer'])
Exemple #38
0
consumer_secret = twitter_consumer_secret


#This prints received tweets to stdout.
class Listener(StreamListener):
    def on_data(self, data):
        print(data)
        return True

    def on_error(self, status):
        print(status)


if __name__ == '__main__':

    # Twitter authetification and connection to Twitter Streaming API
    l = Listener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)

    # Filter Twitter streams by specified keywords
    stream.filter(track=['climate change','global warming','#climatechange', '#globalwarming', \
                         '#climate','#climatestrike','#climatebrawl','#climatecrisis', \
                         '#climatehoax','#climateemergency','#climatescam','#climatecult', \
                         '#climateaction','#climatechangeisreal','#nolifewithoutco2','#fossilfuels', \
                         '#co2gasoflife','#gretathunberg','#lifewantsmoreco2','#ipcc', \
                         '#greennewdeal','#actonclimate','#climateaction','#fridaysforfuture', \
                         '#parisagreement','#climatedebate','#sustainability', \
                         '#climatetownhall','#climatemarch','#earthday','#climatehope', \
                         '#climatejustice'])
from tweepy import OAuthHandler
from tweepy import Stream
import apiconfig as config
import sys
from listner import StdOutListener






access_token=config.access_token
access_token_secret=config.token_secret
consumer_key=config.api_key
consumer_secret=config.api_secret

if __name__== '__main__':
    args=sys.argv
    del args[0]
    l=StdOutListener()
    auth = OAuthHandler(consumer_key,consumer_secret)
    auth.set_access_token(access_token,access_token_secret)
    stream = Stream(auth,l)


    stream.filter(track=args)
Exemple #40
0
        if "jingle" in command:
            command = "jingle"

        if lastpat != command:
            client.publish("ChristmasLights", command)
            #    		print command
            lastpat = command

        return True

    def on_error(self, status):
        #        print(status)
        client.publish("ChristmasLights/log", bytearray(status))


def on_connect(client, userdata, flags, rc):
    client.subscribe("ChristmasLights")


client = mqtt.Client()
client.on_connect = on_connect
client.connect("winter.ceit.uq.edu.au", 1883, 60)
client.loop_start()

l = StdOutListener()
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)

stream = Stream(auth, l)
stream.filter(track=['TheThirstyPlant'])
print('user\'s name: ' + user.name)
print('location: ' + user.location)
print('number of friends: ' + str(user.friends_count))

from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream


#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
    def on_data(self, data):
        print(data)
        return True

    def on_error(self, status):
        print(status)


if __name__ == '__main__':

    #This handles Twitter authetification and the connection to Twitter Streaming API
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)

    #This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
    stream.filter(track=['python'])
Exemple #42
0
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener

# Consumer key, consumer secret, access token, access secret
ckey = "KBhEbR7mtJFiAHbZg9hEw0Udk"
csecret = "oTXaRZJtJe2gs9v0sT0X1d641lJukcvu8HWDCHoxAFBTNz5kgD"
atoken = "971808625122533378-E5G9EvNrIdyCjDEGGOV4qHO5J7Ca1ZY"
asecret = "j8oyDotC6LZcmW9XUoXs3qAwhqIbi2IxjACHuQuDkgrbW"


class MyListener(StreamListener):
    def on_data(self, data):
        print(data)
        return (True)

    def on_error(self, status):
        print(status)


auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)

twitterStream = Stream(auth, MyListener())
twitterStream.filter(track=["BuenJueves"])
            "Guardado " + "=> " + dictTweet["_id"]
        except:
            print
            "Documento ya existe"
            pass
        return True

    def on_error(self, status):
        print
        status


auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())

# Setear la URL del servidor de couchDB
server = couchdb.Server('http://localhost:5984/')
try:
    # Si no existe la Base de datos la crea
    db = server.create('guayaquil')
except:
    # Caso contrario solo conectarse a la base existente
    db = server['guayaquil']

# Aqui se define el bounding box con los limites geograficos donde recolectar los tweets
twitterStream.filter(track=["guayaquil","Jimmy Jairala","#JimmyJairala","Martha Macias","#MarthaMacias","Cynthia Viteri ", "#CynthiaViteri", "Patricio Buendia","#PatricioBuendia","Eduardo Argudo ","#EduardoArgudo","elecciones de alcaldia guayaquil"])
twitterStream.filter(locations=[-79.95912,-2.287573,-79.856351,-2.053362])


Exemple #44
0
# In the code below the line 'output = open("twitter-out.txt", "a")' and the code that follows will output the tweets into a json file with its sentiment score

class listener(StreamListener):
    def on_data(self, data):
        all_data = json.loads(data)

        tweet = all_data["text"]
        sentiment_value, confidence = s.sentiment(tweet)
        print(tweet, sentiment_value, confidence)

        if confidence * 100 >= 80:
            output = open("twitter-out.txt", "a")
            output.write(sentiment_value)
            output.write('\n')
            output.close()

        return True

    def on_error(self, status):
        print(status)

# OAuth process, using the keys and tokens

auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["justice league"])

# The stream is retreiving all the live tweets about your chosen query, in this case justice league.

# Reference for the code used: https://pythonprogramming.net/twitter-sentiment-analysis-nltk-tutorial/
Exemple #45
0
    if image:
        twitter.update_with_media(image,
                                  '@{} {}'.format(str(status.user.screen_name),
                                                  response),
                                  in_reply_to_status_id=status.id)
        os.remove(image)
    else:
        twitter.update_status('@{} {}'.format(str(status.user.screen_name),
                                              response),
                              in_reply_to_status_id=status.id)
    log.info("Responded with: {}".format(response))


class StdOutListener(StreamListener):
    def on_status(self, status):
        if marta_mentioned(status):
            response, image = process_tweet(status)
            log.info("Replying to this tweet from @{}: '{}'".format(
                status.user.screen_name, status.text))
            log.info('Replying with: {}'.format(response))
            respond(status, response, image)
        return True

    def on_error(self, status_code):
        log.error("Error, code" + str(status_code))


l = StdOutListener()
stream = Stream(auth, l)
stream.filter(track=['martatimes'])
Exemple #46
0
                    final_string = prestring.encode("UTF-8", 'ignore')
                    result = '{"text":"' + data[
                        'text'] + '","created_at":"' + data[
                            'created_at'] + '",' + '"coordinates":"[' + str(
                                data['coordinates']['coordinates'][0]
                            ) + ',' + str(
                                data['coordinates']['coordinates'][1]) + ']"},'
                    final_result = emoji_pattern.sub(r'', result)
                    self.outFile.write(final_result)
                    self.outFile.write('\n')
                    self.outFile.flush()
                    return True
        else:
            self.outFile.close()
            return False

    def on_error(self, status):
        print(status)


auth = OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN_KEY, A_TOKEN_SECRET)
myStream = Stream(auth, MyListener(time_limit=3196800))
try:
    myStream.filter(track=[
        'wild fire', 'earthquake', 'tornado', 'flood'
        'volcanic eruption', 'thunderstorm', 'hailstorm'
    ])
except:
    pass
Exemple #47
0
ckey = ' ' #your consumer key
csecret = ' ' #your consumer secret
atoken = ' ' #your access token
asecret = ' ' #your access secret

class listener(StreamListener):

    def on_data(self, data):
        try:
        
		#print data
		tweet = data.split(',"text":"')[1].split('","source')[0] #[1] as we want the right side not the left side of this split
		print tweet
		
		saveThis = str(time.time()) + '::' +tweet #to get unix time
		saveFile = open('twitDB2.txt','a')
		saveFile.write(saveThis)
		saveFile.write('\n')
		saveFile.close()
        	return True
	except BaseException,e:
		print 'failed on data,',str(e)
		time.sleep(5)
    def on_error(self, status):
        print status

auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=["text"]) #replace text with entity related to which tweets are to be extracted. In my case, I used "Narendra Modi"
Exemple #48
0
        print(status)


if __name__ == '__main__':
    # hash_tag_list = ["ottawa", "Canada"]
    fetched_tweets_file = "tweets.json"

    key_word = input("Enter a domain (general topic) to search by:\n")
    others = input(
        "Enter subcategories (e.g vulnerabilities) to search for [No spaces, separate by comma]: \n"
    )
    hash_tag_list = others.strip().split(",")
    key_words_list = [None] * len(hash_tag_list)
    for i in range(len(hash_tag_list)):
        key_words_list[i] = key_word + " " + hash_tag_list[i]
    print(key_words_list)

    kafka = KafkaClient("localhost:9092")
    producer = SimpleProducer(kafka)
    listener = StdOutListener(fetched_tweets_file)

    auth = OAuthHandler(twitter_credentials.CONSUMER_KEY,
                        twitter_credentials.CONSUMER_KEY_SECRET)
    auth.set_access_token(twitter_credentials.ACCESS_TOKEN,
                          twitter_credentials.ACCESS_TOKEN_SECRET)
    stream = Stream(auth, listener)
    stream.filter(track=key_words_list)

    # twitter_streamer = TwitterStreamer()
    # twitter_streamer.stream_tweets(fetched_tweets_file, hash_tag_list)
Exemple #49
0
# The consumer key and secret will be generated for you after
consumer_key = ""
consumer_secret = ""

# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
access_token = ""
access_token_secret = ""


class StdOutListener(StreamListener):
    """ A listener handles tweets that are received from the stream.
    This is a basic listener that just prints received tweets to stdout.

    """
    def on_data(self, data):
        print(data)
        return True

    def on_error(self, status):
        print(status)


if __name__ == '__main__':
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)

    stream = Stream(auth, l)
    stream.filter(track=['basketball'])
Exemple #50
0

def format_filename(fname):
    """ Convert fname into a safe string for a file name. 
			
			Return: string
		"""
    return ''.join(convert_valid(one_char) for one_char in fname)


def convert_valid(one_char):
    """
	Convert a character into '_' if "invalid".

	Return: string

	"""
    valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
    if one_char in valid_chars:
        return one_char
    else:
        return '_'


if __name__ == '__main__':
    query = sys.argv[1:]  # list of CLI arguments
    query_fname = ' '.join(query)  # string
    auth = get_twitter_auth()
    twitter_stream = Stream(auth, CustomLsitener(query_fname))
    twitter_stream.filter(track=query, async=True)
def model_run(txt, path):
    p = os.path.join(path, 'fetched_tweets.txt')
    os.remove(p)
    access_token = "xxxxxx-xxxxx"
    access_token_secret = "xxxxx"
    consumer_key = "xxxxxx"
    consumer_secret = "xxxxxxxx"
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)
    tweets_data = []
    stream.filter(track=[txt], async=True)
    time.sleep(30)
    stream.disconnect()
    for line in open('fetched_tweets.txt', 'r'):
        try:
            tweet = json.loads(line)
            tweets_data.append(tweet)
        except:
            continue
    tweets = pd.DataFrame()
    tweets['text'] = list(map(lambda tweet: tweet['text'], tweets_data))
    tweets['lang'] = list(map(lambda tweet: tweet['lang'], tweets_data))
    tweets['country'] = list(
        map(
            lambda tweet: tweet['place']['country']
            if tweet['place'] != None else None, tweets_data))
    tweets['Source'] = list(map(lambda tweet: tweet['source'], tweets_data))
    tweets['Location'] = list(
        map(lambda tweet: tweet['user']['location'], tweets_data))
    tweets['Name'] = list(map(lambda tweet: tweet['user']['name'],
                              tweets_data))
    tweets['Screen_Name'] = list(
        map(lambda tweet: tweet['user']['screen_name'], tweets_data))
    tweets['URL'] = list(map(lambda tweet: tweet['user']['url'], tweets_data))
    tweets['Geo_enabled'] = list(
        map(lambda tweet: tweet['user']['geo_enabled'], tweets_data))
    tweets.to_csv('spring.csv', index=False)
    lang = pd.read_csv("dictionary of language.csv", encoding='ISO-8859-1')
    full = []
    nulls = []
    for la in tweets['lang']:
        flag = False
        for ind, row in lang.iterrows():
            if la == row['short_form']:
                flag = True
                full.append(row['full_form'])
        if flag == False:
            nulls.append(la)
            full.append("Null")
    tweets['lang'] = full
    sid = SentimentIntensityAnalyzer()
    stop_words = set(stopwords.words('english'))
    final = tweets
    neg, neu, pos, comp = [], [], [], []
    for text in final['text']:
        tokens = word_tokenize(text)
        cleaned = [x for x in tokens if x not in stop_words]
        cleaned = [x for x in cleaned if x.isalpha()]
        cleaned = " ".join(cleaned)
        ss = sid.polarity_scores(cleaned)
        neg.append(ss['neg'])
        neu.append(ss['neu'])
        pos.append(ss['pos'])
        comp.append(ss['compound'])
    final['Negative'] = neg
    final['Neutral'] = neu
    final['Positive'] = pos
    final['Compound'] = comp
    final.to_csv('final.csv')
    return final
def abcd(s):
    auth = OAuthHandler(ckey, csecret)
    auth.set_access_token(atoken, asecret)

    twitterStream = Stream(auth, listener())
    twitterStream.filter(track=[s])
Exemple #53
0
        self.tweet_data = []

    def on_data(self, data):
        saveFile = io.open('raw_tweets.json', 'a', encoding='utf-8')
        while (time.time() - self.time) < self.limit:
            try:
                self.tweet_data.append(data)
                return True
            except BaseException, e:
                print 'failed ondata', str(e)
                time.sleep(5)
                pass

        saveFile - io.open('raw_tweets.json', 'w', encoding='utf-8')
        saveFile.write(u'[\n')
        saveFile.write(','.join(self.tweet_data))
        saveFile.write(u'\n]')
        saveFile.close()
        exit

    def on_error(self, status):
        print statuses


twitterStream = Stream(auth, listener(
    start_time,
    time_limit=20))  #initialize Stream object with a time out limit
twitterStream.filter(track=keyword_list,
                     languages=['en'
                                ])  #call the filter method to run the Stream
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener

consumer_key = "1RS4q7iMO4nY4MYFHyCd7TNMf"
consumer_secret = "WGq0wOimsraVSM3gbrk7oQq0hBpITjQj0xfshwO1xfK32sTA5h"
access_token = "1248849817427730435-kLEU0iRcUAwLUQntoZai3A8dItbm2x"
access_token_secret = "74I2Al2mFJeQXolDYJSZrrV84m0EKJi42GehsROl6krMG"


class StdOutListener(StreamListener):
    def on_data(self, data):
        print(data)
        return True

    def on_error(self, status):
        print(status)


if __name__ == '__main__':

    #This handles Twitter authetification and the connection to Twitter Streaming API
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)

    #This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
    stream.filter(track=['covid-19', 'quarantine', 'lockdown'])
Exemple #55
0
                    logger.warning("Exception: error writing to file caused by: %s" % e)
                    pass
                except Exception as e:
                    raise

        try:
            # search twitter for keywords
            logger.info('Stock symbol: ' + str(args.symbol))
            logger.info('NLTK tokens required: ' + str(nltk_tokens_required))
            logger.info('NLTK tokens ignored: ' + str(nltk_tokens_ignored))
            logger.info('Twitter Feeds: ' + str(twitter_feeds))
            logger.info('Twitter User Ids: ' + str(useridlist))
            logger.info('Listening for Tweets (ctrl-c to exit)...')
            if args.keywords is None:
                logger.info('No keywords entered, following Twitter users')
                stream.filter(follow=useridlist, languages=['en'])
            else:
                # keywords to search on twitter
                # add keywords to list
                keywords = args.keywords.split(',')
                # add stock symbol to keywords
                #keywords.append(args.symbol)
                if args.addtokens:
                    # add tokens to keywords to list
                    for f in nltk_tokens_required:
                        keywords.append(f)
                logger.info('Twitter keywords: ' + str(keywords))
                logger.info('Searching Twitter for keywords')
                stream.filter(track=keywords, languages=['en'])
        except TweepError as te:
            logger.debug("Tweepy Exception: Failed to get tweets caused by: %s" % te)
            if confidence * 100 >= 80:
                output = open('uber.txt', 'a')
                output.write(sentiment_value)
                output.write('\n')
                output.close()

        except BaseException as e:
            print("error")
            #pass
        return True

    def on_error(self, status):
        print(status)
        return True


#twitter_stream = Stream(auth, MyListener())
#twitter_stream.filter(track = ['#SuperBowl'])

while True:
    try:
        # Connect/reconnect the stream
        twitter_stream = Stream(auth, MyListener())
        # DON'T run this approach async or you'll just create a ton of streams!
        twitter_stream.filter(track=['Uber'])
    except:
        pass

end = time.time()
print(end - start)
            y = status.coordinates['coordinates'][1]

            # Save a few parameters into a csv file
            location = status.user.location
            text = status.text
            created = status.created_at
            retweets = status.retweet_count
            hashtag = status.entities.get('hashtags')

            print('Tweet arrived ! /n', status.text)

            with open(self.csvname, 'a') as f:
                csvfile = csv.writer(f)
                csvfile.writerow(
                    [text, hashtag, location, x, y, created, retweets])

    def on_error(self, status_code):
        print(status_code)
        return False


if __name__ == '__main__':
    # This handles Twitter authentification and the connection to Twitter Streaming API
    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_token_secret)
    stream = Stream(auth, l)

    # stream.filter(track=["trump", "clinton", "hillary clinton", "donald trump"])  ## Get feeds from all over the world
    stream.filter(locations=[-180, -90, 180, 90], async=True)
Exemple #58
0
        t = json.loads(data)  #t is just a regular python dictionary.

        tweet = {
            'timestamp': t['timestamp_ms'],
            'time_created': t['created_at'],
            'text': t['text'],
            'username': t['user']['screen_name'],
            'followers_count': t['user']['followers_count']
        }

        db.tweets.insert_one(
            tweet
        )  # insert retrieved tweet into mongodb database collection "tweets"

        #logging.critical(f'\n\n\nTWEET INCOMING: {tweet["text"]}\n\n\n')

    def on_error(self, status):

        if status == 420:
            print(status)
            return False


if __name__ == '__main__':

    auth = authenticate()
    listener = TwitterListener()
    stream = Stream(auth, listener)
    stream.filter(track=['sustainable'], languages=['en'])
            pass

        exit()

    def on_error(self, status_code):
        print(status_code)

    def start_stream(self):
        while True:
            try:
                stream = Stream(auth, l)
                stream.filter(locations=GEOBOX_VALENCIA)
            except:
                continue


if __name__ == '__main__':

    l = StdOutListener()
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_secret)

    GEOBOX_VALENCIA = [-0.4315, 39.4196, -0.2857, 39.5045]

    while True:
        try:
            stream = Stream(auth, l)
            stream.filter(locations=GEOBOX_VALENCIA)
        except:
            continue
Exemple #60
0
            # converts unicode text to string
            unicode_string = d['text']
            encoded_string = unicode_string.encode('utf-8')
            d['text'] = encoded_string
            print d['text']
        else:
            pass
        return True

    # prints out error
    def on_error(self, status):
        print >> sys.stderr, 'Encountered error with status code:', status

    def on_timeout(self):
        print >> sys.stderr, 'Timeout'  # prints out timeout
        return True  # stops program from killing the stream


if __name__ == '__main__':

    # Puts in Twitter authentication and connects to Twitter Streaming API
    auth = OAuthHandler(consumer_key, consumer_secret)
    auth.set_access_token(access_token, access_secret)
    twitterStream = Stream(auth, Listener())

    # This line filters Twitter Streams to capture data by the keywords: 'donald trump', 'jeb bush', 'scott walker', 'marco rubio', 'ben carson', 'ted cruz'
    twitterStream.filter(track=[
        'donald trump', 'jeb bush', 'scott walker', 'marco rubio',
        'ben carson', 'ted cruz'
    ])