コード例 #1
0
 def test_user_lookup(self):
     tw = Twitter(**self.SETTINGS)
     print("Twitter object: ", tw)
     tw.get_access_token()
     user = tw.user_lookup(screen_name='neokluber')
     print("Twitter username: ", user)
     assert user[0]['id_str']
コード例 #2
0
 def test_post_only_text(self):
     t = Twitter(**twitter_access)
     tweet_text = "This is a test for plain text tweet at %r" % (_timenow())
     output = t.post_tweet(text=tweet_text)
     return_tweet_text = output.get('text')
     _add_to_test_tweets(output.get('id'), "text_only")
     self.assertEqual(tweet_text, return_tweet_text)
コード例 #3
0
ファイル: followtwits.py プロジェクト: ghoseb/followtwits
    def post(self):
        """Handle the POST method
        """
        path = os.path.join(os.path.dirname(__file__), 'index.html')
        try:
            username = escape(self.request.get('username'))
            password = escape(self.request.get('password'))
            logging.info("Handling request for %s" % username)
            t = Twitter(username, password)
            friends = set([f['screen_name'] for f in t.get_friends()])
            followers = set([f['screen_name'] for f in t.get_followers()])
            to_follow = followers.difference(friends)

            try:
                for user in to_follow:
                    try:
                        t.friendship_create(user, True)
                        logging.info("%s now follows %s" % (username, user))
                    except DownloadError:
                        logging.warning("Download error when %s tried to follow %s" % (username, user))
                        raise

                self.response.out.write(template.render(path, {"success": True}))

            except Exception, e:
                logging.warning("Caught an exception %s when %s tried to follow %s: %s" % (e, username, user))
                raise

        except DeadlineExceededError:
            self.response.out.write(template.render(path, {"error": True}))
コード例 #4
0
ファイル: server.py プロジェクト: rybesh/helpdesk
def render_note_list_as_html(notes):
    twitter = Twitter()
    twitter.search('from:rybesh')
    return render_template(
        'notes.html',
        notes=notes,
        tdata=twitter.search('from:rybesh'))
コード例 #5
0
ファイル: server.py プロジェクト: qcxu/helpdesk
def render_helprequest_list_as_html(helprequests):
    twitter = Twitter()
    twitter.search('from:rybesh')
    return render_template(
        'helprequests+microdata+rdfa.html',
        helprequests=helprequests,
        tdata=twitter.search('from:rybesh'))
コード例 #6
0
class test_twitter(unittest.TestCase):

    def setUp(self):
        self.t = Twitter()

    def test_get_suburb(self):
        # need tests for oauth failure
        # t.get_tweet()
        # only works when you know what the most recent tweet on twitter is,
        # and it is a suburb with art in it (not Charnwood!)
        # self.assertEqual(self.t.get_suburb(), 'Charnwood', "Tweet not found")
        # self.assertEqual(self.t.get_suburb(), None, "same tweet again")
        None

    def test_parse_tweets(self):
        test = '[{"created_at":"Sat Jul 12 01:22:32 +0000 2014",'
        test = test + '"id":487768939548536840,"id_str":"487768939548536840",'
        test = test + '"text":"@mhvgovhacktest Show me a Parkes sculpture"}]'
        jsontest = json.loads(test)
        self.assertEqual(self.t.parse_tweets(jsontest), 'PARKES', "valid suburb")
        self.assertEqual(self.t.parse_tweets(jsontest), None, "same tweet again")
        test = '[{"created_at":"Sat Jul 12 01:22:32 +0000 2014",'
        test = test + '"id":487768939548536840,"id_str":"487768939548536840",'
        test = test + '"text":"@mhvgovhacktest Show me a Charnwood sculpture"}]'
        jsontest = json.loads(test)
        self.assertEqual(self.t.parse_tweets(jsontest), None, "invalid suburb")

    def tearDown(self):
        del self.t
コード例 #7
0
ファイル: twitdao.py プロジェクト: tongsu/twitdao
    def __init__(self, token=None):
        self.token = token

        config = md.get_app_config()
        
        if token:
            self.twitter = Twitter(
                oauth_token=self.token.oauth_token,
                oauth_token_secret=self.token.oauth_token_secret,
                    
                consumer_key=config.consumer_key,
                consumer_secret=config.consumer_secret,
                request_token_url=config.request_token_url,
                access_token_url=config.access_token_url,
                authorize_url=config.authorize_url,
                authenticate_url=config.authenticate_url,
                api_url=config.api_url,
                search_api_url=config.search_api_url
            )
        else:
            self.twitter = Twitter(                    
                consumer_key=config.consumer_key,
                consumer_secret=config.consumer_secret,
                request_token_url=config.request_token_url,
                access_token_url=config.access_token_url,
                authorize_url=config.authorize_url,
                authenticate_url=config.authenticate_url,
                api_url=config.api_url,
                search_api_url=config.search_api_url
            )
コード例 #8
0
ファイル: tweet.py プロジェクト: nlehuen/led_display
    def _run(self):
        iterator = None

        # Display what is going to be tracked
        self._animation.queue_tweet(dict(
            user = dict(
                screen_name = 'this_display'
            ),
            text = "tracking \"%s\""%self._track
        ))

        try:
            if self._oauth:
                twitter = Twitter(domain="search.twitter.com", auth = self._oauth)
                i1 = twitter.search(q = self._track)
                i1 = i1['results']
            else:
                i1 = None

            if self._oauth:
                twitter_stream = TwitterStream(auth = self._oauth)
                i2 = twitter_stream.statuses.filter(track = self._track)
            else:
                i2 = None

            iterator = chain(i1, i2)

        except Exception, e:
            print "Could not connect to Twitter, generating random tweets"
            print "\t%s"%e

            iterator = self._random()
コード例 #9
0
class TwitterTests(unittest.TestCase):

    def setUp(self):
        self.twitter = Twitter(CUR_DIR + "/test_crossfit.tweets", CUR_DIR + "/test_stop_words.txt")

    def test_data_preprocess(self):
        processor = TwitterDataPreprocessor()

        p_entry1 = processor.preprocess("\"There are no office hours for champions.\"—Paul Dietzel	@CrossFitGames")
        p_entry2 = processor.preprocess("Saturday 6-21-14 - http://t.co/ZtQWUsfal1 http://t.co/jPICqL3adi	@ReebokCrossFit1")
        p_entry3 = processor.preprocess("Crossfit Named - Kristan Clever  Valley CrossFit :	@Cleverhandz")

        text1 = p_entry1[0].strip()
        screen_name1 = p_entry1[1].strip()
        text2 = p_entry2[0].strip()
        screen_name2 = p_entry2[1].strip()
        text3 = p_entry3[0].strip()
        screen_name3 = p_entry3[1].strip()

        self.assertEqual(text1, "there are no office hours for champions paul dietzel")
        self.assertEqual(screen_name1, "crossfitgames")
        self.assertEqual(text2, "saturday 6 21 14 http t co ztqwusfal1 http t co jpicql3adi")
        self.assertEqual(screen_name2, "reebokcrossfit1")
        self.assertEqual(text3, "crossfit named kristan clever valley crossfit")
        self.assertEqual(screen_name3, "cleverhandz")

    def test_twitter_data_building(self):
        self.twitter.load_tweets_and_build_index()
コード例 #10
0
def trainTwitter(username):
  global model
  yield ("Training on twitter for @{}".format(username))
  t = Twitter()

  model = NGram(t.getTweets(username), 2)
  yield "Trained"
コード例 #11
0
ファイル: funlink.py プロジェクト: msteinhoff/foption-bot
class Funlink():
    regexpattern = r':(.+) (?:PRIVMSG) ([\S]+) :.addurl(?: (.+))'
    
    def __init__(self):
        self.Twitter = Twitter("","")
        self.YouTube = YouTube()
        
    def handleInput(self,Matchlist):
        Source = Matchlist[0]
        Target = Matchlist[1]
        Text = Matchlist[2].split()
        
        try:
            URL = tinyurl.create_one(Text[0])
        except Exception:
            PrivMsg(Target,"4Error in 'TINYURL.Modul' >> '" + str(Exception) + "'")
            return
        
        Nick = re.match("(.+?)!", Source).group(1)
        
        if (len(Text) >= 2) or (re.search("(?:.+)youtube.com/(?:.+)v=(\w+)",Text[0]) and len(Text) == 1): #Beschreibung mit angegeben            
            x = "[" + Nick + "] "
            
            #Zusatzinformation ermitteln, wie [YouTube] [PNG] [TIF]
            if (re.search("(?:.+)youtube.com/(?:.+)v=(\w+)",Text[0])):
                x += "[YouTube] "
            elif (re.search("(\w+).rofl.to",Text[0])):
                r = re.search("(\w+).rofl.to",Text[0]).group(1)
                x += "[rofl.to] (" + str(r) +") "
            elif (re.search("collegehumor.com/(\w+)",Text[0])):
                r = re.search("collegehumor.com/(\w+)",Text[0]).group(1)
                x += "[CollegeHumor] (" + str(r) + ")"
            elif (re.search("newgrounds.com/",Text[0])):
                x += "[Newsground] "
            else:
                try:
                    Tag = re.search("\.(bmp|jpg|gif|img|jp2|jpeg|png|psd|tga|tif|txt)$",Text[0]).group(1)
                    x += "[" + Tag.upper() + "] "
                except:
                    pass
            
            if (len(Text) > 1):
                x += URL + " " + " ".join(Text[1:])
            else:
                r = re.search("(?:.+)youtube.com/(?:.+)v=([-_\w]+)",Text[0]).group(1)
                t = self.YouTube.getInfo(r)
                x += URL + " " + t
            
            #Twitter Tweets dürfen nicht länger als 140 Zeichen sein
            if (len(x) <= 140):
                self.Twitter.sendTweet(x)
                PrivMsg(Target,"hinzugefügt! - http://twitter.com/fptlnk","15Funlink:07 ")
            else:
                PrivMsg(Target,"Beschreibung zu lang. Max 140 Zeichen. Dein Add war " \
                + str(len(x)) + " Zeichen lang.","15Funlink:07 ")
        else: #Keine Beschreibung
                PrivMsg(Target,"Die Beschreibung fehlt!","15Funlink:07 ")
コード例 #12
0
ファイル: twitter_utils.py プロジェクト: tetious/podiobooks
def search(keywords):
    """Search the twitter timeline for keywords"""
    twitter_search = Twitter(domain="search.twitter.com")
    
    response = twitter_search.search(q=keywords)
    
    if response:
        return response['results']
    else:
        return None  # pragma: no cover
コード例 #13
0
def main():
    conf = get_conf()
    twitter = Twitter(conf)

    parser = argparse.ArgumentParser()

    parser.add_argument('-q', '--query', type = str,
        help = "Gets all tweets and retweets for a specifc search query (realtime)")
    parser.add_argument('-t', '--timeline', action = "store_true",
        help = "Gets all tweets for the authenticated user")
    parser.add_argument('-u', '--user', type = str,
        help = "Get a timeline with a username")
    parser.add_argument('-s', '--search', type = str,
        help = "Get results for a search query (not realtime)")

    args = parser.parse_args()

    if args.query:
        twitter.query(args.query)
    elif args.timeline:
        twitter.run_timeline()
    elif args.user:
        twitter.user_timeline()
    elif args.search:
        twitter.search(args.search)
    else:
        parser.print_help()
コード例 #14
0
ファイル: views.py プロジェクト: afternoon/followize
def auth(request):
    """Kick off the OAuth process"""
    tw = Twitter()
    try:
        token = tw.new_request_token()
    except DownloadError:
        return fail(request, _(u"Twitter is not responding!"
            u" Refresh to try again."))
    auth_url = tw.authorisation_url(token)
    request.session["unauthed_token"] = token.to_string()   
    return HttpResponseRedirect(auth_url)
コード例 #15
0
    def test_get_friends(self):
        tw = Twitter(**self.SETTINGS)
        print("Twitter object: ", tw)
        tw.get_access_token()
        user = tw.user_lookup(screen_name='neokluber')
        uid = user[0]['id_str']

        friend_list = tw.get_friends(uid)
        print("Friends list: ", friend_list)
        assert isinstance(friend_list, list)
        assert friend_list
コード例 #16
0
    def test_get_settings_geo_true(self):
        """
        This test should pass if user has turned the  account setting for Tweet location on using Twitter web/app
        :return:
        :rtype:
        """
        t = Twitter(**twitter_access)
        settings = t.get_account_settings()
        geo = settings.get('geo_enabled')

        self.assertEqual(True, geo)
コード例 #17
0
    def test_post_with_media(self):
        t = Twitter(**twitter_access)
        tweet_text = "This is a test for tweet with 3 media at %r. Pictures from Pixabay." % (_timenow())
        jpegs = ['m01.jpg', 'm02.jpg', 'm03.jpg']
        media_list = [os.path.join('testing', i) for i in jpegs]
        output = t.post_tweet(text=tweet_text, media=media_list)
        return_tweet_text = output.get('text')
        _add_to_test_tweets(output.get('id'), "with_media")

        media_urls = [i['media_url_https'] for i in output['extended_entities']['media']]

        self.assertEqual(len(media_list), len(media_urls))
コード例 #18
0
ファイル: manage.py プロジェクト: gthank/pytips
def _scrape_twitter_for_latest():
    """Scrape Twitter for interesting, new Python tips."""
    # This is the secret sauce: search Twitter for '#python tip' tweets that
    # are newer than our current newest.
    new_tweet = Tip.query.newest_tip()
    tweet_id = new_tweet.url.split('/')[-1] if new_tweet else None
    twitter_search = Twitter(domain='search.twitter.com')
    hits = twitter_search.search(q='#python tip', since_id=tweet_id)['results']
    # And now filter out all the retweets.
    not_old_rts = [t for t in hits if not _is_oldstyle_rt(t)]
    embedded_tweets = [_get_embedded(t['id_str']) for t in not_old_rts]
    return [t for t in embedded_tweets if not _is_newstyle_rt(t)]
コード例 #19
0
    def test_post_with_latlong_geo_true(self):
        t = Twitter(**twitter_access)
        ll = (37.000880, -122.062309)
        tweet_text = "This is a test for plain text tweet with location at %r" % (_timenow())
        output = t.post_tweet(text=tweet_text, latlong=ll)
        return_tweet_text = output.get('text')
        _add_to_test_tweets(output.get('id'), "text_geo")

        self.assertEqual(tweet_text, return_tweet_text)
        if t.get_account_settings().get('geo_enabled'):
            return_ll = tuple(output['geo']['coordinates'])
            self.assertAlmostEqual(ll, return_ll)
コード例 #20
0
    def test_get_timeline(self):
        t = Twitter(**twitter_access)
        user = twitter_username
        count = 6
        output = t.get_user_timeline(user=user, count=count)
        resp_user = set([i['user']['screen_name'] for i in output])

        # Set the ID for latest tweet before tests
        _write_top_tweet_file(output[0]['id'])

        self.assertEqual(1, len(resp_user))
        self.assertEqual(user, list(resp_user)[0])
        self.assertEqual(count, len(output))
コード例 #21
0
    def test_get_limits(self):
        tw = Twitter(**self.SETTINGS)
        print("Twitter object: ", tw)
        tw.get_access_token()

        limits = tw.get_limits()
        print("Limits: ", limits)

        assert (limits.get('resources').get('friends').get('/friends/ids')
                .get('remaining'))

        assert (limits.get('resources').get('users').get('/users/lookup')
                .get('remaining'))
コード例 #22
0
ファイル: test_twitter.py プロジェクト: mbainrot/govhack2014
class test_twitter(unittest.TestCase):

    def setUp(self):
        self.t = Twitter()

    def test_get_suburb(self):
        # need tests for oauth failure
        # t.get_tweet()
        # only works when you know what the most recent tweet on twitter is,
        # and it is a suburb with art in it (not Charnwood!)
        # self.assertEqual(self.t.get_suburb()['suburb'], 'CHARNWOOD', "Tweet not found")
        # self.assertEqual(self.t.get_suburb(), None, "same tweet again")
        None

    def test_parse_tweets(self):
        # parse tweets returns  dict {'timestamp', 'suburb', 'screen_name'}
        test = '[{"created_at":"Sat Jul 12 01:22:32 +0000 2014",'
        test = test + '"id":487768939548536840,"id_str":"487768939548536840",'
        test = test + '"text":"@mhvgovhacktest Show me a Parkes sculpture",'
        test = test + '"user":{"screen_name":"cmrn"}}]'
        jsontest = json.loads(test)
        suburb = self.t.parse_tweets(jsontest)
        ts = time.strptime("Sat Jul 12 01:22:32 +0000 2014", '%a %b %d %H:%M:%S +0000 %Y')
        tse = int(time.strftime('%s', ts))

        self.assertEqual(suburb['suburb'], 'PARKES', "valid suburb")
        self.assertEqual(suburb['screen_name'], 'cmrn', "screen name")
        self.assertEqual(suburb['timestamp'], tse, "timestamp")
        test = '[{"created_at":"Sat Jul 12 01:22:32 +0000 2014",'
        test = test + '"id":487768939548536840,"id_str":"487768939548536840",'
        test = test + '"text":"@mhvgovhacktest Show me a Parkes sculpture",'
        test = test + '"user":{"screen_name":"cmrn"}}]'
        jsontest = json.loads(test)
        suburb = self.t.parse_tweets(jsontest)
        self.assertEqual(suburb['suburb'], 'PARKES', "previous suburb")

        test = '[{"created_at":"Sat Jul 12 01:22:32 +0000 2014",'
        test = test + '"id":487768939548539999,"id_str":"487768939548536840",'
        test = test + '"text":"@mhvgovhacktest Show me a Civic sculpture",'
        test = test + '"user":{"screen_name":"cmrn"}}]'
        jsontest = json.loads(test)
        suburb = self.t.parse_tweets(jsontest)
        ts = time.strptime("Sat Jul 12 01:22:32 +0000 2014", '%a %b %d %H:%M:%S +0000 %Y')
        tse = int(time.strftime('%s', ts))
        self.assertEqual(suburb['suburb'], 'CITY', "valid suburb")
        self.assertEqual(suburb['screen_name'], 'cmrn', "screen name")
        self.assertEqual(suburb['timestamp'], tse, "timestamp")

    def tearDown(self):
        del self.t
コード例 #23
0
ファイル: TwitterMonitor.py プロジェクト: jungepiraten/ircbot
	def monitorloop(self):
		twitter = Twitter(domain="search.twitter.com")
		results = twitter.search(q=self.query,result_type="recent",rpp="1")
		watermark = results["max_id_str"]
		while True:
			results = twitter.search(q=self.query,result_type="recent",since_id=watermark,include_entities=1)
			for tweet in results["results"]:
				text = tweet["text"]
				for url in tweet["entities"]["urls"]:
					text = text.replace(url["url"], url["expanded_url"])
				self.callback(	tweet["from_user"],
						"https://twitter.com/#!/" + quote(tweet["from_user"]) + "/status/" + tweet["id_str"],
						text )
			watermark = results["max_id_str"]
			time.sleep(60)
コード例 #24
0
ファイル: tweetchi.py プロジェクト: EricSchles/tweetchi
class Api(object):

    def __init__(self):
        self.twitter_api = None
        self.twitter_search = Twitter(domain='search.twitter.com')

    @twitter_error
    def search(self, query, **params):
        if isinstance(query, unicode):
            query = query.encode('utf-8')
        return self.twitter_search.search(q=query, **params)

    @twitter_error
    def follow(self, ids, limit=10):
        " Follow on user. "
        for user_id in as_tuple(ids):
            self.twitter_api.friendships.create(user_id=user_id)
            limit -= 1
            if not limit:
                return True

    @twitter_error
    def mentions(self, since_id=None):
        " Get account mentions and save in db. "

        params = dict(count=200)
        if since_id:
            params['since_id'] = since_id

        mentions = sorted(map(
            Status.create_from_status,
            self.twitter_api.statuses.mentions(**params)))
        db.session.add_all(mentions)
        db.session.commit()

        return mentions

    @twitter_error
    def update(self, message, async=False, **kwargs):
        " Update twitter status and save it in db. "

        self.app.logger.info('Tweetchi: "%s"' % message)

        if not async:

            status = Status.create_from_status(
                self.twitter_api.statuses.update(status=message, **kwargs),
                myself=True)

            db.session.add(status)
            db.session.commit()

            return status

        from .celery import update as cupdate
        cupdate.delay(message,
                      self.config.get('OAUTH_TOKEN'),
                      self.config.get('OAUTH_SECRET'),
                      self.config.get('CONSUMER_KEY'),
                      self.config.get('CONSUMER_SECRET'), **kwargs)
コード例 #25
0
ファイル: test_cron.py プロジェクト: uniteddiversity/bridgy
  def test_update_twitter_pictures(self):
    sources = []
    for screen_name in ('a', 'b', 'c'):
      auth_entity = oauth_twitter.TwitterAuth(
        id='id', token_key='key', token_secret='secret',
        user_json=json.dumps({'name': 'Ryan',
                              'screen_name': screen_name,
                              'profile_image_url': 'http://pi.ct/ure',
                              }))
      auth_entity.put()
      sources.append(Twitter.new(None, auth_entity=auth_entity).put())

    user_objs = [{'screen_name': sources[0].id(),
                  'profile_image_url': 'http://pi.ct/ure',
                  }, {'screen_name': sources[1].id(),
                      'profile_image_url_https': 'http://new/pic_normal.jpg',
                      'profile_image_url': 'http://bad/http',
                  }]

    cron.TWITTER_USERS_PER_LOOKUP = 2
    self.expect_urlopen(cron.TWITTER_API_USER_LOOKUP % 'a,c',
                        json.dumps(user_objs))
    self.expect_urlopen(cron.TWITTER_API_USER_LOOKUP % 'b',
                        json.dumps(user_objs))
    self.mox.ReplayAll()

    resp = cron.application.get_response('/cron/update_twitter_pictures')
    self.assertEqual(200, resp.status_int)

    # self.assertEquals('http://pi.ct/ure', sources[0].get().picture)
    # self.assertEquals('http://new/pic.jpg', sources[1].get().picture)
    self.assertEquals('https://twitter.com/a/profile_image?size=original',
                      sources[0].get().picture)
    self.assertEquals('https://twitter.com/b/profile_image?size=original',
                      sources[1].get().picture)
コード例 #26
0
ファイル: test_twitter.py プロジェクト: LennonFlores/bridgy
  def test_new_massages_profile_image(self):
    """We should use profile_image_url_https and drop '_normal' if possible."""
    user = json.loads(self.auth_entity.user_json)
    user['profile_image_url_https'] = 'https://foo_normal.xyz'
    self.auth_entity.user_json = json.dumps(user)

    self.assertEqual('https://foo.xyz', Twitter.new(self.handler, auth_entity=self.auth_entity).picture)
コード例 #27
0
ファイル: utils.py プロジェクト: ruaronicola/TelepathyBot
 def query(hashtags, c):
     # Request to filter retweets
     query = "%s %s" % (hashtags, "-filter:retweets")
     print "New twitter query: "+query
     result = Twitter.create().search.tweets(
             q=query, result_type='recent', lang='en', count=c)
     return result
コード例 #28
0
ファイル: bot.py プロジェクト: carriercomm/omnibot
    def __init__(self, bot):
        config = ConfigParser.RawConfigParser()
        config.read(os.path.dirname(__file__) + os.sep + bot + os.sep + "omni.cfg")

        consumer_key = config.get(bot, 'consumer_key')
        consumer_secret = config.get(bot, 'consumer_secret')

        oauth = config.get(bot, 'oauth')
        oauth_filename = os.path.dirname(__file__) + os.sep + bot + os.sep + oauth
        oauth_token, oauth_token_secret = read_token_file(oauth_filename)

        self.handle = config.get(bot, 'handle')
        self.corpus = os.path.dirname(__file__) + os.sep + bot + os.sep + config.get(bot, 'corpus')
        self.method = config.get(bot, 'tweet_method')
        self.twitter = Twitter(domain='search.twitter.com')
        self.twitter.uriparts = ()
        self.poster = Twitter(
            auth=OAuth(
                oauth_token,
                oauth_token_secret,
                consumer_key,
                consumer_secret
            ),
            secure=True,
            api_version='1.1',
            domain='api.twitter.com')
コード例 #29
0
    def setUp(self):
        """
        Setup search engine that will be subjected to the tests.
        """
        self.twitter = Twitter(CUR_DIR + "/test_crossfit.tweets", CUR_DIR + "/test_stop_words.txt")
        self.twitter.load_tweets_and_build_index()

        self.searcher = Searcher(self.twitter.tweets, self.twitter.stop_words)
コード例 #30
0
ファイル: twitter_test.py プロジェクト: notenoughneon/bridgy
  def test_get_activities(self):
    self.expect_urlopen('https://api.twitter.com/1.1/statuses/user_timeline.json?'
                        'include_entities=true&count=0',
      json.dumps([as_twitter_test.TWEET]))
    self.mox.ReplayAll()

    tw = Twitter.new(self.handler, auth_entity=self.auth_entity)
    self.assert_equals([as_twitter_test.ACTIVITY], tw.get_activities())
コード例 #31
0
def search_tweets_from_API(save_name,
                           save_location,
                           query,
                           last_tweet_id,
                           tweets_per_query=100,
                           max_tweets=10000000,
                           max_id=-1):
    """
            Search for tweets

            Parameters
            ----------
            save_name: string
                    Name of the file to save the tweets to
            save_location: os.path
                    location to save the tweets to
            query: string
                    search query
            last_tweet_id: int
                    latest tweet ID that we collected previously (can also be None if collecting tweets for the first time)
            max_tweets : int (optional)
                    large number to make sure we collect all the available tweets
            tweets_per_query : int (optional)
                    the maximum number of tweets that can be returned by the API.
            max_id : int long (optional)
                    to make sure we exhaust the search
    """

    logging.info('Called function: {} '.format(sys._getframe().f_code.co_name))

    # create connection to twitter API
    twitter = Twitter(key=API_KEY, secret=API_SECRET)

    # connect to Twitter API
    twitter.connect_to_API()

    # create full file name
    file_name = os.path.join(save_location, save_name)

    # keep track of the number of tweets
    num_tweets = 0

    # create empty file
    with open(file_name, 'w') as f:

        while num_tweets < max_tweets:
            try:
                if (max_id <= 0):
                    if (not last_tweet_id):

                        new_tweets = twitter.search_tweets(
                            q=query,
                            count=tweets_per_query,
                            tweet_mode='extended')

                    else:

                        new_tweets = twitter.search_tweets(
                            q=query,
                            count=tweets_per_query,
                            since_id=last_tweet_id,
                            tweet_mode='extended')

                else:

                    if (not last_tweet_id):

                        new_tweets = twitter.search_tweets(
                            q=query,
                            count=tweets_per_query,
                            max_id=str(max_id - 1),
                            tweet_mode='extended')

                    else:

                        new_tweets = twitter.search_tweets(
                            q=query,
                            count=tweets_per_query,
                            max_id=str(max_id - 1),
                            since_id=last_tweet_id,
                            tweet_mode='extended')

                # update counter
                num_tweets += len(new_tweets)

                # check if no tweets could be obtained
                if num_tweets == 0:

                    logging.debug('No tweets found, no files created, exit...')

                    # close file
                    f.close()
                    # remove file (if we don't remove it here, we might use it to find the latest ID, but since its empty, it will give an error)
                    os.remove(file_name)

                    break

                # break if no more tweets can be retrieved
                if not new_tweets:
                    logging.debug('No more tweets found, exiting...')
                    break

                # append tweets to file
                for tweet in new_tweets:
                    f.write(json.dumps(tweet._json) + '\n')

                logging.debug(
                    'Downloaded number of tweets: {}'.format(num_tweets))

                # set the max ID
                max_id = new_tweets[-1].id

            except e:

                logging.error('[{}] : {}'.format(
                    sys._getframe().f_code.co_name, e))
                # close file
                f.close()
                # remove file (if we don't remove it here, we might use it to find the latest ID, but since its empty, it will give an error)
                os.remove(file_name)

                break
コード例 #32
0
def get_tweet(tweet_id):
    """Looks up data for a single tweet."""

    twitter = Twitter(logs_to_cloud=False)
    return twitter.get_tweet(tweet_id)
コード例 #33
0
ファイル: main.py プロジェクト: jpowers/trump2cash
def twitter_callback(tweet):
    """Analyzes Trump tweets, makes stock trades, and sends tweet alerts."""

    # Initialize these here to create separate httplib2 instances per thread.
    analysis = Analysis(logs_to_cloud=LOGS_TO_CLOUD)
    trading = Trading(logs_to_cloud=LOGS_TO_CLOUD)

    companies = analysis.find_companies(tweet)
    logs.debug("Using companies: %s" % companies)
    if companies:
        trading.make_trades(companies)
        twitter.tweet(companies, tweet)


if __name__ == "__main__":
    logs = Logs(name="main", to_cloud=LOGS_TO_CLOUD)

    # Restart in a loop if there are any errors so we stay up.
    while True:
        logs.info("Starting new session.")

        twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
        try:
            twitter.start_streaming(twitter_callback)
        except BaseException as exception:
            logs.catch(exception)
        finally:
            twitter.stop_streaming()
            logs.info("Ending session.")
コード例 #34
0
ファイル: test_sanity.py プロジェクト: sathish-deevi/twitter
import os
from random import choice
import time
import pickle
import json

from twitter import Twitter, NoAuth, OAuth, read_token_file, TwitterHTTPError
from twitter.api import TwitterDictResponse, TwitterListResponse, POST_ACTIONS, method_for_uri
from twitter.cmdline import CONSUMER_KEY, CONSUMER_SECRET

noauth = NoAuth()
oauth = OAuth(*read_token_file('tests/oauth_creds') +
              (CONSUMER_KEY, CONSUMER_SECRET))

twitter11 = Twitter(domain='api.twitter.com', auth=oauth, api_version='1.1')

twitter_upl = Twitter(domain='upload.twitter.com',
                      auth=oauth,
                      api_version='1.1')

twitter11_na = Twitter(domain='api.twitter.com',
                       auth=noauth,
                       api_version='1.1')

AZaz = "abcdefghijklmnopqrstuvwxyz1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ"

b64_image_data = b"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAAAXNSR0IArs4c6QAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB94JFhMBAJv5kaUAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAA4UlEQVQoz7WSIZLGIAxG6c5OFZjianBcIOfgPkju1DsEBWfAUEcNGGpY8Xe7dDoVFRvHfO8NJGRorZE39UVe1nd/WNfVObcsi3OOEAIASikAmOf5D2q/FWPUWgshKKWfiFIqhNBaxxhPjPQ05/z+Bs557xw9hBC89ymlu5BS8t6HEC5NW2sR8alRRLTWXoRSSinlSejT12M9BAAAgCeoTw9BSimlfBIu6WdYtVZEVErdaaUUItZaL/9wOsaY83YAMMb0dGtt6Jdv3/ec87ZtOWdCCGNsmibG2DiOJzP8+7b+AAOmsiPxyHWCAAAAAElFTkSuQmCC"


def get_random_str():
コード例 #35
0
import time
from dateutil.parser import parse

##### END IMPORT #####

##### BEGIN AUTHENTICATION #####

access_token = "298221198-0ncIxf4UXOaOj7GTKe24sffxCcRwfcwveIwJMGWH"
access_secret = "klSYxSRqPQzIhP721DBuvtH1nTY93K4vZ4B3cHHwYIQsF"
consumer_key = "JJ6I0QWQEtbY2qcVJPHkd6p7n"
consumer_secret = "jzq41XHKGwMArIZ3ATAhjj4sky4yJXVRACKNxaa4O4IKJzZ1UA"

oauth = OAuth(access_token, access_secret, consumer_key, consumer_secret)

# conect to REST API
twitter_rest = Twitter(auth=oauth)

##### END AUTHENTICATION #####
'''
# get a number of tweets of @UFPA_Oficial
ufpa_tweets = twitter_rest.statuses.user_timeline(screen_name = "lidiagianne", count = 10)
# save them to a .txt file
with open('ufpa-tweets.txt', 'w') as ufpa_t:
	ufpa_t.write(json.dumps(ufpa_tweets, indent = 4, sort_keys = True))
	ufpa_t.write('\n')

# get a number of tweets of @UFPA_Oficial (JSON response manipulation)
ufpa_tweets = twitter_rest.statuses.user_timeline(screen_name = "UFPA_Oficial", count = 100)
# save them to a .txt file
with open('ufpa-tweets-JSON-response-manipulated.txt', 'w') as s_t:
	for tweet in ufpa_tweets:
コード例 #36
0
ファイル: data.py プロジェクト: isabella232/bestsongs15
def update_featured_social():
    """
    Update featured tweets
    """
    COPY = copytext.Copy(app_config.COPY_PATH)
    secrets = app_config.get_secrets()

    # Twitter
    print 'Fetching tweets...'

    twitter_api = Twitter(
        auth=OAuth(
            secrets['TWITTER_API_OAUTH_TOKEN'],
            secrets['TWITTER_API_OAUTH_SECRET'],
            secrets['TWITTER_API_CONSUMER_KEY'],
            secrets['TWITTER_API_CONSUMER_SECRET']
        )
    )

    tweets = []

    for i in range(1, 4):
        tweet_url = COPY['share']['featured_tweet%i' % i]

        if isinstance(tweet_url, copytext.Error) or unicode(tweet_url).strip() == '':
            continue

        tweet_id = unicode(tweet_url).split('/')[-1]

        tweet = twitter_api.statuses.show(id=tweet_id)

        creation_date = datetime.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
        creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)

        tweet_url = 'http://twitter.com/%s/status/%s' % (tweet['user']['screen_name'], tweet['id'])

        photo = None
        html = tweet['text']
        subs = {}

        for media in tweet['entities'].get('media', []):
            original = tweet['text'][media['indices'][0]:media['indices'][1]]
            replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (media['url'], app_config.PROJECT_SLUG, tweet_url, media['display_url'])

            subs[original] = replacement

            if media['type'] == 'photo' and not photo:
                photo = {
                    'url': media['media_url']
                }

        for url in tweet['entities'].get('urls', []):
            original = tweet['text'][url['indices'][0]:url['indices'][1]]
            replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (url['url'], app_config.PROJECT_SLUG, tweet_url, url['display_url'])

            subs[original] = replacement

        for hashtag in tweet['entities'].get('hashtags', []):
            original = tweet['text'][hashtag['indices'][0]:hashtag['indices'][1]]
            replacement = '<a href="https://twitter.com/hashtag/%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'hashtag\', 0, \'%s\']);">%s</a>' % (hashtag['text'], app_config.PROJECT_SLUG, tweet_url, '#%s' % hashtag['text'])

            subs[original] = replacement

        for original, replacement in subs.items():
            html =  html.replace(original, replacement)

        # https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid
        tweets.append({
            'id': tweet['id'],
            'url': tweet_url,
            'html': html,
            'favorite_count': tweet['favorite_count'],
            'retweet_count': tweet['retweet_count'],
            'user': {
                'id': tweet['user']['id'],
                'name': tweet['user']['name'],
                'screen_name': tweet['user']['screen_name'],
                'profile_image_url': tweet['user']['profile_image_url'],
                'url': tweet['user']['url'],
            },
            'creation_date': creation_date,
            'photo': photo
        })

    # Facebook
    print 'Fetching Facebook posts...'

    fb_api = GraphAPI(secrets['FACEBOOK_API_APP_TOKEN'])

    facebook_posts = []

    for i in range(1, 4):
        fb_url = COPY['share']['featured_facebook%i' % i]

        if isinstance(fb_url, copytext.Error) or unicode(fb_url).strip() == '':
            continue

        fb_id = unicode(fb_url).split('/')[-1]

        post = fb_api.get_object(fb_id)
        user  = fb_api.get_object(post['from']['id'])
        user_picture = fb_api.get_object('%s/picture' % post['from']['id'])
        likes = fb_api.get_object('%s/likes' % fb_id, summary='true')
        comments = fb_api.get_object('%s/comments' % fb_id, summary='true')
        #shares = fb_api.get_object('%s/sharedposts' % fb_id)

        creation_date = datetime.strptime(post['created_time'],'%Y-%m-%dT%H:%M:%S+0000')
        creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)

        # https://developers.facebook.com/docs/graph-api/reference/v2.0/post
        facebook_posts.append({
            'id': post['id'],
            'message': post['message'],
            'link': {
                'url': post['link'],
                'name': post['name'],
                'caption': (post['caption'] if 'caption' in post else None),
                'description': post['description'],
                'picture': post['picture']
            },
            'from': {
                'name': user['name'],
                'link': user['link'],
                'picture': user_picture['url']
            },
            'likes': likes['summary']['total_count'],
            'comments': comments['summary']['total_count'],
            #'shares': shares['summary']['total_count'],
            'creation_date': creation_date
        })

    # Render to JSON
    output = {
        'tweets': tweets,
        'facebook_posts': facebook_posts
    }

    with open('data/featured.json', 'w') as f:
        json.dump(output, f)
コード例 #37
0
if sys.version_info[0] < 3:
    raise Exception("Python 3 is required to run this script")

try:
    with open('config_json.txt') as config_file:
        config = json.load(config_file)
except Exception as e:
    print("Could not load configuration from config_json.txt")
    exit

oauth = OAuth(config['ACCESS_TOKEN'], config['ACCESS_SECRET'],
              config['CONSUMER_KEY'], config['CONSUMER_SECRET'])

# Initiate the connection to Twitter
twitter_search = Twitter(auth=oauth)

query_terms = ["Testing"]
if len(sys.argv) > 1:
    query_terms = sys.argv[1:]

# This is the data we want to pull from each tweet
data_headers = [
    "id", "user", "created_at", "text", "truncated", "lang", "geo",
    "coordinates", "place", "retweet_count", "favorite_count"
]

# Save each new file with a date and time stamp
datestring = datetime.strftime(datetime.now(), '%Y-%m-%d-%H%M')
filename = '{}_tweets.csv'.format(datestring)
コード例 #38
0
    # The strategy needs to be active.
    if strategy["action"] == "hold":
        return False

    # We need to know the stock price.
    if not strategy["price_at"] or not strategy["price_eod"]:
        return False

    return True


if __name__ == "__main__":
    analysis = Analysis(logs_to_cloud=False)
    trading = Trading(logs_to_cloud=False)
    twitter = Twitter(logs_to_cloud=False)

    # Look up the metadata for the tweets.
    tweets = twitter.get_tweets(TWEET_IDS)

    events = []
    for tweet in tweets:
        event = {}

        timestamp_str = tweet["created_at"]
        timestamp = trading.utc_to_market_time(
            datetime.strptime(timestamp_str, "%a %b %d %H:%M:%S +0000 %Y"))
        text = tweet["text"]
        event["timestamp"] = timestamp
        event["text"] = text
        event["link"] = twitter.get_tweet_link(tweet)
コード例 #39
0
from twitter import Twitter, OAuth, TwitterHTTPError
import os

# put your tokens, keys, secrets, and Twitter handle in the following variables
OAUTH_TOKEN = "1274110761506390016-4OO8BNT0q2rY8XKcViEkKB9I4NKYFs"
OAUTH_SECRET = "Esa4zQZHEnXwzfvF5OVS1uTFtqkqJXe6n2wkEvP9jcYyw"
CONSUMER_KEY = "d8MxSNTmgWAMRmNhlXG8Oc9KY"
CONSUMER_SECRET = "3OaZ7aW3MmnZSmYZtT1V0p7vCZpcoqSkdm9jAFbJGhppSP2hB9"
TWITTER_HANDLE = "19962221"

# put the full path and file name of the file you want to store your "already followed"
# list in
ALREADY_FOLLOWED_FILE = "already-followed.csv"

t = Twitter(
    auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET))


def search_tweets(q, count=100, result_type="recent"):
    """
        Returns a list of tweets matching a certain phrase (hashtag, word, etc.)
    """

    return t.search.tweets(q=q, result_type=result_type, count=count)


def auto_fav(q, count=100, result_type="recent"):
    """
        Favorites tweets that match a certain phrase (hashtag, word, etc.)
    """
コード例 #40
0
ファイル: twitter.py プロジェクト: ruphy/hepdata
def tweet(title, collaborations, url, version=1):
    """
    :param title:
    :param collaborations:
    :param url:
    :param version:
    :return:
    """
    if USE_TWITTER:

        OAUTH_TOKEN = current_app.config['OAUTH_TOKEN']
        OAUTH_SECRET = current_app.config['OAUTH_SECRET']
        CONSUMER_KEY = current_app.config['CONSUMER_KEY']
        CONSUMER_SECRET = current_app.config['CONSUMER_SECRET']

        if not OAUTH_TOKEN or not OAUTH_SECRET or not CONSUMER_KEY or not CONSUMER_SECRET:
            # log this error
            print("Twitter credentials must be supplied!")
        else:
            twitter = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET,
                                         CONSUMER_KEY, CONSUMER_SECRET))

            # Try to tweet with paper title truncated to 10 words.
            # If tweet exceeds 140 characters, keep trying with one less word each time.
            words = 10
            tweeted = False
            while words and not tweeted:

                try:

                    if version == 1:
                        status = "Added{0} data on \"{1}\" to {2}".format(
                            get_collaboration_string(collaborations),
                            truncate_string(
                                encode_string(cleanup_latex(title)), words),
                            url)
                    else:
                        status = "Revised{0} data on \"{1}\" at {2}?version={3}".format(
                            get_collaboration_string(collaborations),
                            truncate_string(
                                encode_string(cleanup_latex(title)), words),
                            url, version)

                    twitter.statuses.update(status=status)
                    tweeted = True
                    print("Tweeted: {}".format(status))

                except Exception as e:
                    # It would be nice to get a stack trace here
                    if e.e.code == 403:
                        error = json.loads(e.response_data.decode('utf8'))
                        if error["errors"][0][
                                "code"] == 186:  # Status is over 140 characters.
                            words = words - 1  # Try again with one less word.
                        else:
                            break
                    else:
                        break

            if not tweeted:
                print(e.__str__())
                print("(P) Failed to post tweet for record {0}".format(url))
コード例 #41
0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
   python_version  2.7.11
   autho zzl
"""
from twitter import Twitter, OAuth

#Make sure to add the access tokens and consumer keys for your application
t = Twitter(auth=OAuth("Access Token", "Access Token Secret", "Consumer Key",
                       "Consumer Secret"))
pythonStatuses = t.statuses.user_timeline(screen_name="montypython", count=5)
print(pythonStatuses)
コード例 #42
0
def _search_twitter_thread(scan_id, asset_kw):

    issue_id = 0
    findings = []
    twitter = Twitter(auth=OAuth(engine.options["twitter_oauth_token"],
                                 engine.options["twitter_oauth_secret"],
                                 engine.options["twitter_consumer_key"],
                                 engine.options["twitter_consumer_secret"]),
                      retry=True)

    # Set the Max count
    max_count = APP_SEARCH_TWITTER_MAX_COUNT_DEFAULT
    extra_kw = ""
    since = ""
    if "search_twitter_options" in engine.scans[scan_id]["options"].keys(
    ) and engine.scans[scan_id]["options"][
            "search_twitter_options"] is not None:
        if "max_count" in engine.scans[scan_id]["options"][
                "search_twitter_options"].keys() and engine.scans[scan_id][
                    "options"]["search_twitter_options"][
                        "max_count"] is not None and isinstance(
                            engine.scans[scan_id]["options"]
                            ["search_twitter_options"]["max_count"], int):
            max_count = engine.scans[scan_id]["options"][
                "search_twitter_options"]["max_count"]
        if "extra_kw" in engine.scans[scan_id]["options"][
                "search_twitter_options"].keys() and engine.scans[scan_id][
                    "options"]["search_twitter_options"][
                        "extra_kw"] is not None and isinstance(
                            engine.scans[scan_id]["options"]
                            ["search_twitter_options"]["extra_kw"], list):
            extra_kw = " OR ".join(engine.scans[scan_id]["options"]
                                   ["search_twitter_options"]["extra_kw"])
        if "since" in engine.scans[scan_id]["options"][
                "search_twitter_options"].keys() and engine.scans[scan_id][
                    "options"]["search_twitter_options"][
                        "since"] is not None and isinstance(
                            engine.scans[scan_id]["options"]
                            ["search_twitter_options"]["since"], basestring):
            since = "since:{}".format(engine.scans[scan_id]["options"]
                                      ["search_twitter_options"]["since"])

    # WARNING a query should not exceed 500 chars, including filters and operators
    # print "query_string :", "\""+asset_kw+"\" "+extra_kw+" "+since+" -filter:retweets", "len:", len("\""+asset_kw+"\" "+extra_kw+" "+since+" -filter:retweets")
    results = twitter.search.tweets(q="\"" + asset_kw + "\" " + extra_kw +
                                    " -filter:retweets",
                                    count=max_count)
    # print results

    if len(results["statuses"]) == 0:  # no results
        metalink = "https://twitter.com/search" + results["search_metadata"][
            "refresh_url"]
        new_finding = PatrowlEngineFinding(
            issue_id=issue_id, type="twitter_leak",
            title="No matching tweets.",
            description="No matching tweet with following parameters:\n" + \
                "Keyword (strict): {}\n".format(asset_kw) + \
                "Extra key words: {}\n".format(extra_kw) + \
                "URL: {}\n".format(metalink),
            solution="N/A",
            severity="info", confidence="firm",
            raw=results,
            target_addrs=[asset_kw],
            meta_links=[metalink])
        findings.append(new_finding)

    else:
        for tweet in results["statuses"]:
            # print "id:", tweet["id"], "text:", tweet["text"]
            # print "user_id:", tweet["user"]["id"], "user_name:", tweet["user"]["name"], "user_nickname:", tweet["user"]["screen_name"]
            # print "tweet_url:", "https://twitter.com/i/web/status/"+tweet["id_str"]

            issue_id += 1
            tw_hash = hashlib.sha1(tweet["text"]).hexdigest()[:6]

            metalink = "https://twitter.com/search" + results[
                "search_metadata"]["refresh_url"]
            new_finding = PatrowlEngineFinding(
                issue_id=issue_id, type="twitter_leak",
                title="Tweet matching search query (HASH: {}).".format(tw_hash),
                description="A tweet matching monitoring keywords has been found:\n" + \
                    "Query options:\nKeyword (strict): {}\n".format(asset_kw) + \
                    "Extra key words: {}\n".format(extra_kw) + \
                    "URL: {}\n".format(metalink),
                solution="Evaluate criticity. See internal procedures for incident reaction.",
                severity="high", confidence="firm",
                raw=tweet,
                target_addrs=[asset_kw],
                meta_links=[metalink])
            findings.append(new_finding)

    # Write results under mutex
    scan_lock = threading.RLock()
    with scan_lock:
        engine.scans[scan_id][
            "findings"] = engine.scans[scan_id]["findings"] + findings
コード例 #43
0
from twitter import Twitter, OAuth

aToken = r"2354306079-sDY74vrY2tIsO5B4lIgbGKni1y3UFYPuE7kGW3E"
aTokenSec = r"xJQGZipFJRU7yi0wAGsfo1TvrUaTp1TQklwveOqUDd6EY"
cKey = r"mjuoCpYhKAyInosFQbVdHMjrG"
cKeySec = r"Yq7PfP8Vy48JehXXjLNcNqekjVVDnpGTQStx7jTB89aHH7tGWN"
t = Twitter(auth=OAuth(aToken,aTokenSec,cKey,cKeySec))
pythonTweets = t.search.tweets(q = "#python")
print(pythonTweets)
コード例 #44
0
from twitter import Twitter
import sys
import json
import csv
import os

if __name__ == '__main__':
    ifn = sys.argv[1]

    filepath = os.path.normpath(ifn)
    ifbasename = os.path.basename(filepath)
    dirname = os.path.dirname(filepath) or '.'

    with open('credentials.csv') as infile:
        reader = csv.reader(infile)
        t = Twitter(list(reader)[1:])

    with open(ifn) as infile:
        for line in infile:
            tweet_id = line.strip()
            with open('%s/%s_retweets.json' % (dirname, tweet_id),
                      'w',
                      newline='') as outfile:
                tweets = t.get_retweets(tweet_id)
                for tweet in tweets:
                    outfile.write(json.dumps(tweet) + '\n')
import pandas as pd
#import json normalize
from pandas.io.json import json_normalize

#twitter keys
ck = "gwEdUQjvo9JDSLMpii205Q06O"
cs = "OlA4PIlLfpZ5HcEAGPOIJx4DwduhmACShoGQ66mJYt4ocxShwX"
at = "823996990329589760-1wW34frljiy7HrWp777qGFzOhEZksMW"
ats = "nzbKJCBUqlCsP6MNe5CFrOzZHo4wcvbubt1nJTqRkvFiB"
oauth = OAuth(at,ats,ck,cs)

#OAuth


#Twitter search
api = Twitter(auth=oauth)

t_loc = api.trends.available()
loc_df = json_normalize(t_loc)
loc_df[(loc_df['countryCode']=='US') & (loc_df['name'].str.contains('New'))]
ny_trend = api.trends.place(_id = 2459115)
nydf = json_normalize(ny_trend,'trends')
nydf.head()
nydf.sort_values('tweet_volume',ascending=False).head(5)



q='StudentsStandUp'
df = pd.DataFrame()
mid = 0
for i in range(10):
コード例 #46
0
from twitter import OAuth, Twitter
from credentials import ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET

oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twit = Twitter(auth=oauth, retry=1)

followers = twit.followers.ids(user_id=1204948499005001728)
following = twit.friends.ids(user_id=1204948499005001728)
# maximum 5000 users, so if I ever have more than that many followers,
# I'll need to add consecutive searches.

to_follow = []
to_delete = []

for usr_id in followers['ids']:
    if usr_id not in following['ids']:
        to_follow.append(usr_id)

for usr_id in following['ids']:
    if usr_id not in followers['ids']:
        to_delete.append(usr_id)

print(f'to_follow ({len(to_follow)}):\n{to_follow}\n')
print(f'to_delete ({len(to_delete)}):\n{to_delete}\n')

print('following and deleting...')
for usr_id in to_follow:
    try:
        twit.friendships.create(user_id=usr_id)
    except Exception as e:
        print(e)
コード例 #47
0
    r'(?s)<a class="story-link".*?href="(.*?)".*?>.*?<h2.*?>(.*?)</h2>.*?'
    r'<img.*?src="(.*?)".*?>.*?</a>')
latest_expr = re.compile(
    r'(?s)<ol class="story-menu theme-stream initial-set">(.*)</ol>')

try:
    oauth = OAuth(os.environ['ACCESS_TOKEN'], os.environ['ACCESS_SECRET'],
                  os.environ['CONSUMER_KEY'], os.environ['CONSUMER_SECRET'])
    SHORTE_ST_TOKEN = os.environ['SHORTE_ST_TOKEN']
except KeyError:  # For local tests.
    with open('credentials', 'r') as secret:
        exec(secret.read())
        oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY,
                      CONSUMER_SECRET)

t = Twitter(auth=oauth)
# For uploading photos.
t_upload = Twitter(auth=oauth, domain="upload.twitter.com")

ts = TwitterStream(auth=oauth)
tu = TwitterStream(auth=oauth, domain="userstream.twitter.com")

# Following are some useful wrappers for Twitter-related functionalities.


def pf(sn):
    """
    Attempts to print the followers of a user, provided their
    screen name.
    """
コード例 #48
0
 def user_sentiment(self):
     for product in self.products:
         tweets = numpy.asarray(Twitter.get_latest_tweets(product, 100))
     return 1 if len(tweets) == 0 else (tweets == 'POSITIVE').mean()
コード例 #49
0
ファイル: app.py プロジェクト: jeddiesaudi/SADFESS
from twitter import Twitter
import time

tw = Twitter()


def start():
    print("Starting program...")
    dms = list()
    while True:
        if len(dms) is not 0:
            for i in range(len(dms)):
                message = dms[i]['message']
                # I take sender_id just in case you want to know who's sent the message
                sender_id = dms[i]['sender_id']
                id = dms[i]['id']
                text = "[BOT] Tweet kamu udah terkirim ya. Kamu udahan ya sedihnya, jangan sedih terus :("

                if len(message) is not 0 and len(message) < 280:
                    # prikitiw is the keyword
                    # if you want to turn off the case sensitive like: priktiw, Prikitiw, pRiKiTiw
                    # just use lower(message) and check it, but please remove the replace function line
                    if "sad" in message.lower():
                        if len(message) is not 0:
                            if dms[i]['media'] is None:
                                print("DM will be posted")
                                tw.post_tweet(message)
                                tw.send_dm(sender_id, text)
                                tw.delete_dm(id)
                            else:
                                print("DM will be posted with media")
コード例 #50
0
ファイル: snippet.py プロジェクト: someburner/GistsHub
"""

from twitter import Twitter, OAuth
from pyquery import PyQuery
from urllib import urlopen
from urlparse import urljoin
from jinja2 import Template
from logging import getLogger
from sys import stdout

TOKEN_KEY = ''
TOKEN_SECRET = ''
CONSUMER_KEY = ''
CONSUMER_SECRET = ''

t = Twitter(auth=OAuth(TOKEN_KEY, TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
logger = getLogger()

TEMPLATE = """<?xml version="1.0" encoding="ISO-8859-1"?>
<opml version="1.0">
	<head>
		<title>OPML</title>
		<ownerName>Steadman</ownerName>
		<ownerEmail>[email protected]</ownerEmail>
	</head>
	<body>
		{% for f in feeds %}<outline text="{{ f.name }}" title="{{ f.name }}">
			<outline text="{{ f.name }}" title="{{ f.name }}" type="rss" xmlUrl="{{ f.feed_url }}" htmlUrl="{{ f.html_url }}" />
		</outline>
		{% endfor %}
	</body>
コード例 #51
0
ファイル: wumpus.py プロジェクト: cherdt/wumpus-twitter
from state import State
from board import Board
from config import get_client
from birdy.twitter import UserClient
from twitter import Twitter
import re
import random
import sys

state = State("state")
board = Board()
# create twitter client
client = get_client()
# create twitter wrapper
twitter = Twitter(client, "twitter_state")
game_over = False


def is_debug_mode():
    return False


def end_game(msg):
    global game_over
    game_over = True
    state.delete()
    tweet(msg + "\n\nDM me any time to start a new game!")


def tweet(msg):
    if is_debug_mode():
コード例 #52
0
def twitter(request):
    print(request.module)
    twitter = Twitter()
    yield twitter
    twitter.delete()
コード例 #53
0
ファイル: collect.py プロジェクト: rmdes/gazouilloire
# -*- coding: utf-8 -*-

import csv, json
from time import time, sleep
from pymongo import MongoClient
from twitter import Twitter, OAuth2, TwitterHTTPError
from config import CSV_SOURCE, CSV_ENCODING, CSV_TWITTER_FIELD, MONGO_DATABASE, TWITTER
from gazouilloire.tweets import prepare_tweet

with open(CSV_SOURCE) as f:
    data = list(csv.DictReader(f, delimiter=';'))

oauth2 = OAuth2(bearer_token=json.loads(
    Twitter(api_version=None,
            format="",
            secure=True,
            auth=OAuth2(TWITTER['KEY'], TWITTER['SECRET'])).oauth2.token(
                grant_type="client_credentials"))['access_token'])
api = Twitter(auth=oauth2)

db = MongoClient("localhost", 27017)[MONGO_DATABASE]


def wrapper(route, args={}, tryouts=50):
    try:
        return route(**args)
    except TwitterHTTPError as e:
        routestr = '/'.join(route.uriparts[1:])
        if e.e.code == 429:
            reset = int(e.e.headers["x-rate-limit-reset"])
            sleeptime = int(reset - time() + 2)
コード例 #54
0
ファイル: trnds.py プロジェクト: imhardikj/TwitterAPI
    import simplejson as json

import csv

# Import the necessary methods from "twitter" library
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream

# Variables that contains the user credentials to access Twitter API
ACCESS_TOKEN = '1660471136-gTl1XwgI1yVTBCQwhQBAknEhqmeEHKjhXaJyutR'
ACCESS_SECRET = 'iaKFeL2OtS15ddTjFurWeVpUrligzJHJ9PgYqfBkIDAw2'
CONSUMER_KEY = 'tDHKXx4j2wXlm1j4y97FpTFPn'
CONSUMER_SECRET = 'aL92keHuYjydc2K8oXAPfT1cwO4Elr3upLIecYNTfdVEjx59hH'

oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)

# Initiate the connection to Twitter REST API
twitter = Twitter(auth=oauth)

india_trends = twitter.trends.place(_id=23424848)

#print(json.dumps(world_trends, indent=4))

i = 0
with open('trnds.txt', 'a') as obj:
    while i < 10:
        obj.write(india_trends[0]['trends'][i]['name'] + '\n')
        i = i + 1

with open('trnds.txt') as obj:
    for line in obj:
        print(line)
コード例 #55
0
# https://www.google.co.in/search?site=imghp&tbm=isch&source=hp&biw=1280&bih=647&q=trump+funny+meme
# Also post images in replies.

try:
    OAUTH = OAuth(os.environ['TW_ACCESS_TOKEN'],
                  os.environ['TW_ACCESS_SECRET'],
                  os.environ['TW_CONSUMER_KEY'],
                  os.environ['TW_CONSUMER_SECRET'])
    SHORTE_ST_TOKEN = os.environ['SHORTE_ST_TOKEN']
except KeyError:  # For local runs.
    with open('.env', 'r') as secret:
        exec(secret.read())
        OAUTH = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY,
                      CONSUMER_SECRET)

ACCOUNT_HANDLER = Twitter(auth=OAUTH)
STREAM_HANDLER = TwitterStream(auth=OAUTH)


def main():
    """Main function to handle different activites of the account."""

    streamer = managers.StreamThread(
        STREAM_HANDLER, ACCOUNT_HANDLER)  # For the troubling part.
    account_manager = managers.AccountThread(
        ACCOUNT_HANDLER)  # For retweets, likes, follows.
    streamer.start()
    account_manager.run()


# Execute the main() function only if script is executed directly.
コード例 #56
0
                    required=False,
                    type=int,
                    default=TELEGRAM_TWITTER_USER_ID)
args = parser.parse_args()

consumer_key = args.consumer_key
consumer_secret = args.consumer_secret
access_token = args.access_token
access_token_secret = args.access_token_secret
bot_token = args.bot_token
user_id = args.user_id

if not user_id:
    logger.warning('user_id not set, you will not be able to tweet')

twitter = Twitter(consumer_key, consumer_secret, access_token,
                  access_token_secret)


def error_handler(update: Update, context: CallbackContext):
    try:
        raise context.error
    except TelegramError as e:
        update.message.reply_text(e.message)
        logger.exception(e)
    except Exception as e:
        update.message.reply_text(e.message)
        logger.exception(e)


def start(update: Update, context: CallbackContext) -> None:
    """Send a message when the command /start is issued."""
コード例 #57
0
def get_tweet(tweet_id):
    """Looks up data for a single tweet."""

    twitter = Twitter()
    return twitter.get_tweet(tweet_id)
コード例 #58
0
import re
import unicodedata
from twitter import OAuth, Twitter
import numpy as np
import pandas as pd
import arrow
from . import templates, plots
from loonathetrends.utils import get_video_title_lookup, get_video_ismv_lookup

auth = OAuth(
    os.environ["TWITTER_ACCESSTOKEN"],
    os.environ["TWITTER_ACCESSSECRET"],
    os.environ["TWITTER_CONSUMERKEY"],
    os.environ["TWITTER_CONSUMERSECRET"],
)
t = Twitter(auth=auth)
t_upload = Twitter(domain="upload.twitter.com", auth=auth)

MILESTONES = {
    100_000: "100k",
    200_000: "200k",
    500_000: "500k",
    1_000_000: "1M",
    2_000_000: "2M",
    5_000_000: "5M",
    10_000_000: "10M",
    20_000_000: "20M",
    50_000_000: "50M",
    100_000_000: "100M",
}
コード例 #59
0
ファイル: unblockercli.py プロジェクト: webcoderz/UNBLOCK.PY
#CONSUMER_KEY = int(CONSUMER_KEY)
#CONSUMER_SECRET = int(CONSUMER_SECRET)
#ACCESS_KEY = int(ACCESS_KEY)
#ACCESS_SECRET = int(ACCESS_SECRET)

# debugging purposes.
print(CONSUMER_KEY)
print(CONSUMER_SECRET)
print(ACCESS_KEY)
print(ACCESS_SECRET)

# inside this call, im not exactly sure if they can be passed as a string, or as an int, so i figured for ease of use,
# set as string and if it fails handle as necessary.
api = Twitter(auth=Oauth(CONSUMER_KEY,
                        CONSUMER_SECRET,
                        ACCESS_KEY,
                        ACCESS_SECRET,
                        tweet_mode='extended',
                        sleep_on_rate_limit=True))


def get_blocks(filename):
    with open(filename, 'w+') as f:
        blocks = api.GetBlocksIDs()
        f.write(json.dumps(blocks))
    return True


def unblock(blocklist):
    with open(blocklist, 'r') as f:
        blocks = json.loads(f.read())
    while blocks:
コード例 #60
0
    try:
        d = {'"': '\\"', "'": "\\'", "\0": "\\\0", "\\": "\\\\"}
        return ''.join(d.get(c, c) for c in s)
    except:
        return s


if __name__ == '__main__':
    # ts = TwitterStream(auth=auth, domain='userstream.twitter.com')
    # for msg in ts.user(tweet_mode='extended'):
    #     # print_log(json.dumps(msg, indent=4))
    #     tweete = analyze_twitter(msg)
    #     if tweete:
    #         save_twitter(session, [tweete, ])

    t = Twitter(auth=auth)

    while True:
        print_log('start work...')
        messages = t.statuses.home_timeline(tweet_mode='extended', count=200)
        to_insert = []

        for msg in messages:
            # print_log(json.dumps(msg, indent=4))
            to_insert.append(analyze_twitter(msg))

        to_insert = filter_twitters(local_session, to_insert)

        save_twitter(local_session, to_insert)
        print_log('{} twitters saved'.format(len(to_insert)))
        break