Beispiel #1
0
def lambda_handler(event, context):
    with open("credentials.json", "r") as creds_file:
        creds = json.load(creds_file)
        oauth_consumer_key = creds["oauth_consumer_key"]
        oauth_consumer_secret = creds["oauth_consumer_secret"]
        oauth_token = creds["oauth_token"]
        oauth_token_secret = creds["oauth_token_secret"]

        api = TwitterAPI(consumer_key=oauth_consumer_key,
                         consumer_secret=oauth_consumer_secret,
                         access_token_key=oauth_token,
                         access_token_secret=oauth_token_secret)

        username = event.get('payload')
        msg = "Remember to LOCK the door!"

        event = {
            "event": {
                "type": "message_create",
                "message_create": {
                    "target": {
                        "recipient_id": username
                    },
                    "message_data": {
                        "text": msg
                    }
                }
            }
        }

    send_msg = api.request('direct_messages/events/new', json.dumps(event))

    print('SUCCESS' if send_msg.status_code == 200 else 'PROBLEM: ' +
          send_msg.text)
def main():
    app_settings = settings.get_app_settings()
    api = TwitterAPI.TwitterAPI(**app_settings)

    store_data = store.get_data()

    search_term = 'vnnlp'
    query = {'screen_name': search_term}

    filename = 'user_timeline/{}.yaml'.format(search_term)

    utils.ensure_dir(filename)

    if 'user_timeline' in store_data and 'max_id' in store_data[
            'user_timeline']:
        query['max_id'] = store_data['user_timeline']['max_id'] - 1

    max_id = None
    try:
        with open(filename, 'a') as output_file:
            r = TwitterAPI.TwitterPager(api, 'statuses/user_timeline', query)

            for tweet in r.get_iterator():
                yaml.dump([tweet], output_file, default_flow_style=False)
                if 'id' in tweet:
                    max_id = tweet['id']

    except KeyboardInterrupt:
        pass

    if not 'user_timeline' in store_data:
        store_data['user_timeline'] = {}

    store_data['user_timeline']['max_id'] = max_id
    store.store_data(store_data)
Beispiel #3
0
def send_tweet(message_to_tweet: str, url_to_media: str) -> int:
    """
    Send a tweet with an image.
    :param message_to_tweet: Message to send
    :param url_to_media: Image to upload
    :return: Tweet ID (-1 if it failed)
    :raises Exception: Tweet failed to send for some reason
    """
    twitter_api = TwitterAPI.TwitterAPI(
        config.get('twitter', 'CONSUMER_KEY'),
        config.get('twitter', 'CONSUMER_SECRET'),
        config.get('twitter', 'ACCESS_TOKEN_KEY'),
        config.get('twitter', 'ACCESS_TOKEN_SECRET'),
    )

    logging.info('Tweet to send: {}'.format(message_to_tweet))
    try:
        if url_to_media is not None:
            resize_image(url_to_media)
            photo = open(url_to_media, 'rb')
            status = twitter_api.request('statuses/update_with_media',
                                         {'status': message_to_tweet},
                                         {'media[]': photo})
            logging.info('Twitter Status Code: {}'.format(status.status_code))

            response = TwitterAPI.TwitterResponse(status, False).json()
            logging.info('Twitter Response Parsed: {}'.format(response))
            return int(response['id_str'])
        raise Exception("No image attached to tweet")
    except UnicodeDecodeError:
        logging.exception(
            'Your message could not be encoded. Perhaps it contains non-ASCII characters?'
        )
        raise Exception("Tweet failed to send")
Beispiel #4
0
def get_results() -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
    """
    Get the results from the competition and print it out
    :return: Winner's name and their query
    """
    twitter_api = TwitterAPI.TwitterAPI(
        config.get('twitter', 'CONSUMER_KEY'),
        config.get('twitter', 'CONSUMER_SECRET'),
        config.get('twitter', 'ACCESS_TOKEN_KEY'),
        config.get('twitter', 'ACCESS_TOKEN_SECRET'),
    )

    valid_normal_entries: List[Dict[str, Any]] = []
    valid_regex_entries: List[Dict[str, Any]] = []

    logging.info('GET RESULTS')

    json_db: Dict[str, Any] = load_json_db(
        config.get('scryfallCardGolf', 'TWEET_DATABASE'))
    max_key: str = max(json_db.keys())

    r = TwitterAPI.TwitterPager(twitter_api, 'statuses/mentions_timeline', {
        'count': 200,
        'since_id': json_db[max_key]['tweet_id']
    })

    for item in r.get_iterator():
        if 'text' not in item:
            logging.warning('SUSPEND, RATE LIMIT EXCEEDED: ' + item['message'])
            break

        logging.info('[TWEET] ' + item['user']['screen_name'] + ': ' +
                     item['text'])
        for url in item['entities']['urls']:
            test_url = url['expanded_url']
            if 'scryfall.com' not in test_url:
                continue

            logging.info('{} submitted solution: {}'.format(
                item['user']['screen_name'], test_url))
            test_query_results = test_query(item['user']['screen_name'],
                                            test_url)
            if test_query_results:
                user_json_entry: Dict[str, Any] = {
                    'name': item['user']['screen_name'],
                    'length': len(test_query_results),
                    'query': test_query_results
                }

                if re.search(r'/.+/', test_query_results):
                    valid_regex_entries.append(user_json_entry)
                else:
                    valid_normal_entries.append(user_json_entry)

    return valid_normal_entries, valid_regex_entries
        def tweet_download_and_lang_detect(df_tweets, user_id,
                                           update_to_invalid_list):
            """
            Calls language detection and checks if enough german tweets remain.
            If it found almost enough german Tweets it will load more.
            If it found almost none it will abort.
            :param df_tweets: 0 during first run, dataframe with tweets during later runs
            :param user_id: Twitter User_ID for tweet download and language check
            :param update_to_invalid_list: List of user that can not be downloaded from. Will append to if applicable.
            :return: df_tweets, update_to_invalid_list, abort_loop, len_df
            """
            if isinstance(df_tweets, int):
                df_tweets = TwitterAPI.API_tweet_multitool(
                    user_id,
                    'temp',
                    pages=1,
                    method='user_timeline',
                    append=False,
                    write_to_db=False)  # fills DF with 200 tweets of 1 page
                df_tweets = helper_functions.lang_detect(df_tweets)
            else:
                df_tweets_additions = TwitterAPI.API_tweet_multitool(
                    user_id,
                    'temp',
                    pages=1,
                    method='user_timeline',
                    append=False,
                    write_to_db=False)  # fills DF with 200 tweets of 1 page
                df_tweets_additions = helper_functions.lang_detect(
                    df_tweets_additions)
                if isinstance(df_tweets_additions, pd.DataFrame):
                    df_tweets = pd.concat([df_tweets, df_tweets_additions])
                    df_tweets.reset_index(inplace=True)
                    del df_tweets['index']

            # if df_tweets is None: #no tweets found or all tweets deleted (non german)
            #     abort_loop = True
            #     return df_tweets, update_to_invalid_list, abort_loop'

            len_df = helper_functions.dataframe_length(df_tweets)
            if len_df <= 50:
                # if almost no tweets are german don't try to get more german tweets from this users.
                # would take to many page loads
                update_to_invalid_list.append(user_id)
                abort_loop = True
            elif len_df >= 200:
                abort_loop = True
            else:
                # if to few tweets are german load more tweets to get a better result
                abort_loop = False
            gc.collect()
            return df_tweets, update_to_invalid_list, abort_loop, len_df
 def check_user_info(self):
     """
     Searches for twitter user profile and saves the data
     :return:
     """
     #connect to the api
     api = Tapi.TwitterAPI(cons_key, cons_secret, token, token_secret)
     #get request
     out = api.request('users/lookup', {'Name': '{}'.format(self.username), 'screen_name': '{}'.format(self.username)})
     if out is not None:
         for i in out:
             info = i
         following = info['friends_count']
         followers = info['followers_count']
         join_date = info['created_at']
         location = info['location']
         description = info['description']
         user_id = info['id_str']
         #create user
         self.user = TwitterUser(username=info['screen_name'], location=location, description=description,
                                 date_joined=join_date, following=following, followers=followers,id=user_id)
         # save this user
         TwitterUserDA().save_user(self.user)
     else:
         self.user = TwitterUser(username=self.username, location="", description="*manually created*",
                                 date_joined="Joined June 2009", following='', followers='')
Beispiel #7
0
 def __init__(self, twitter_creds, host, port):
     self.api = TwitterAPI(twitter_creds["consumer_key"],
                           twitter_creds["consumer_secret"],
                           twitter_creds["access_token_key"],
                           twitter_creds["access_token_secret"])
     self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     self.s.bind((host, port))
     print host, port
     self.s.listen(5)
     while True:
         try:
             c, addr = self.s.accept()
             self.c = c
             self.stream()
         except Exception as e:
             print e
Beispiel #8
0
 def searchTwitterAPI(self):
     query = str(self.filmEntry.get())
     releaseDate = str(self.rdEntry.get())
     dbTableName = str(query.replace(" ", ""))
     filmTitleFile = open("filmTitle.txt", "w")
     filmTitleFile.write(str(dbTableName) + "\n")
     filmTitleFile.write(releaseDate)
     filmTitleFile.close()
     api = TwitterAPI.TwitterClient()
     weekNo = api.getWeekNO(releaseDate)
     tableExists = api.createDBTable(dbTableName)
     if tableExists:
         #check if there are tweets with the same week number if there are
         #and the weekNO is differenet then we search for tweets and add them
         #else we just continue
         cur.execute(
             """SELECT COUNT(*) FROM """ + str(dbTableName) +
             """ WHERE weekNo=?""", (weekNo, ))
         if cur.fetchone()[0] >= 1:
             self.goToResults()
         else:
             tweets = api.getTweets(query)
             api.populateDB(dbTableName, tweets, weekNo)
             api.populateReultsTable(dbTableName, weekNo)
             self.goToResults()
     else:
         tweets = api.getTweets(query)
         #now we will see if the table is in the database and if it is we will
         #then check if the data we are adding is new if it is the it will be added to the db
         api.createDBTable(dbTableName)
         api.populateDB(dbTableName, tweets, weekNo)
         api.populateReultsTable(dbTableName, weekNo)
         self.goToResults()
Beispiel #9
0
 def test_paging_iterator(self):
     pager = TwitterAPI.TwitterRestPager(self.api,
                                         'search/tweets',
                                         {'q': 'pizza'})
     self.assertIsInstance(pager, TwitterAPI.TwitterRestPager)
     it = pager.get_iterator()
     self.use_iterator(it)
Beispiel #10
0
 def populateResultsPage(self):
     file = open("filmTitle.txt", "r")
     lines = file.readlines()
     rdate = lines[1].strip()
     tableName = lines[0].strip()
     api = TwitterAPI.TwitterClient()
     weekNO = api.getWeekNO(rdate)
     polarity = api.countPostiveTweets(
         tableName, weekNO) / api.countNegativeTweets(tableName, weekNO)
     subjectivity = (api.countPostiveTweets(tableName, weekNO) +
                     api.countNegativeTweets(tableName, weekNO)
                     ) / api.countNeutralTweets(tableName, weekNO)
     self.polarityResult.config(text=str(polarity))
     self.subjectivityResult.config(text=str(subjectivity))
     self.weekResult.config(text=str(weekNO))
     #now to get the chnage in polarity and subjectivity
     try:
         polarityChange = polarity - (
             api.countPostiveTweets(tableName, (weekNO - 1)) /
             api.countNegativeTweets(tableName, (weekNO - 1)))
         subjectivityChange = subjectivity - (
             (api.countPostiveTweets(tableName, (weekNO - 1)) +
              api.countNegativeTweets(tableName, (weekNO - 1))) /
             api.countNeutralTweets(tableName, (weekNO - 1)))
         #now i will set all the labels to the new values
         self.polarityCResult.config(text=str(polarityChange))
         self.subjectivityCResult.config(text=str(subjectivityChange))
     except ZeroDivisionError:
         self.polarityCResult.config(text="N/A")
         self.subjectivityCResult.config(text="N/A")
def main():
    app_settings = settings.get_app_settings()
    api = TwitterAPI.TwitterAPI(**app_settings)

    search_term = 'dongng'
    query = {'screen_name': search_term}

    twitter.fetch_one(api, 'followers/list', search_term, query)
Beispiel #12
0
    def setUp(self):
        """Read credentials from TwitterAPI/credentials.txt. You
		must copy your credentials into this text file.
		"""
        oa = TwitterAPI.TwitterOAuth.read_file()
        self.api = TwitterAPI.TwitterAPI(oa.consumer_key, oa.consumer_secret,
                                         oa.access_token_key,
                                         oa.access_token_secret)
Beispiel #13
0
def tweetImage(message, image_file):
    api = ta.TwitterAPI(consumer_key, consumer_secret, access_token_key,
                        access_token_secret)
    file = open(image_file, 'rb')
    data = file.read()
    r = api.request('statuses/update_with_media', {'status': message},
                    {'media[]': data})
    return 'SUCCESS' if r.status_code == 200 else 'FAILURE'
Beispiel #14
0
def main():
    app_settings = settings.get_app_settings()
    api = TwitterAPI.TwitterAPI(**app_settings)

    search_term = 'dongng'
    query = {'q': search_term}

    twitter.fetch(api, 'search/tweets', search_term, query)
Beispiel #15
0
    def __init__(self, tweet_count=200, twitter_account='stackdevjobs'):

        self.tweet_count = tweet_count
        self.twitter_account = twitter_account

        self.api = TwitterAPI.TwitterAPI(self.consumer_key,
                                         self.consumer_secret,
                                         auth_type='oAuth2')
Beispiel #16
0
def main():
    api = TwitterAPI.TwitterAPI(**settings.get_app_settings())

    # twitter.fetch(api, 'search/tweets', 'input', {'q': 'coccoc', 'lang': 'vi'})
    # twitter.fetch(api, 'search/tweets', 'input', {'q': '#coccoc', 'lang': 'vi'})
    # twitter.fetch(api, 'search/tweets', 'input', {'q': 'cntt', 'lang': 'vi'})
    # twitter.fetch(api, 'search/tweets', 'input', {'q': 'ifan', 'lang': 'vi'})
    # twitter.fetch(api, 'search/tweets', 'input', {'q': 'yêu', 'lang': 'vi'})
    twitter.fetch(api, 'search/tweets', 'input', {'q': 'ghét', 'lang': 'vi'})
Beispiel #17
0
def test_app_normal(event, mocker):
    r = requests.Response()
    r.status_code = 200
    r._content = '{}'.encode()
    res = TwitterAPI.TwitterResponse(r, None)
    mocker.patch('TwitterAPI.TwitterAPI.request', return_value=res)

    ret = app.handle(event, config)
    assert ret == {'MessageIds': ['dummy']}
Beispiel #18
0
def test_app_abnormal(event, mocker):
    r = requests.Response()
    r.status_code = 401  # authorization error
    r._content = '{}'.encode()
    res = TwitterAPI.TwitterResponse(r, None)
    mocker.patch('TwitterAPI.TwitterAPI.request', return_value=res)

    with pytest.raises(Exception):
        app.handle(event, config)
Beispiel #19
0
def test_app_normal(event, mocker):
    r = requests.Response()
    r.status_code = 200
    r._content = '{"id_str": "12345"}'.encode()
    res = TwitterAPI.TwitterResponse(r, None)
    mocker.patch('TwitterAPI.TwitterAPI.request', return_value=res)

    ret = app.handle(event, config)
    assert ret == {'Results': {'dummy': '12345'}}
Beispiel #20
0
def test_app_already_retweeted(event, mocker):
    r = requests.Response()
    r.status_code = 403
    error = {"errors": [{"code": 187, "message": "Status is a duplicate."}]}
    r._content = json.dumps(error).encode()
    res = TwitterAPI.TwitterResponse(r, None)
    mocker.patch('TwitterAPI.TwitterAPI.request', return_value=res)

    ret = app.handle(event, config)
    assert ret == {'Results': {'dummy': None}}
Beispiel #21
0
def connect_to_streaming_API():
    ''' Connect to twitter streaming API
        Returns an iterator with all the stream data. '''

    api = TwitterAPI.TwitterAPI(
        'f8olfZWtAPvgANdP9qecg', 'bSEnCXJuWazjT8S8hZ6BLWMo1C7egIKNgjObHM6Ck',
        '1726636778-jEn4qUAj2wV60ckbskNSbLJgTRr0c7hiemVOU7x',
        'UgwEfM3cukoWIxCWjCiIZiJ0gnQVGH9U42WLfJjnEFODw')

    r = api.request('statuses/filter', {'track': '#rip'})

    return r.get_iterator()
Beispiel #22
0
    def getTweetCount(q, p=False, debug=False):
        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)

        api = TwitterAPI.TwitterAPI(consumer_key, consumer_secret,
                                    access_token, access_token_secret)

        #today's info
        a = time.time()
        timeStamp = datetime.datetime.utcnow().date()
        tDay = timeStamp.day
        tMonth = timeStamp.strftime("%b")

        api = TwitterAPI.TwitterAPI(consumer_key, consumer_secret,
                                    access_token, access_token_secret)
        count = 0
        r = TwitterPager(api, 'search/tweets', {'q': q, 'count': 100})
        for item in r.get_iterator(wait=6):
            time_stamp = item['created_at']
            day = int(time_stamp[8:10])
            if (tDay != day and tDay - 1 != day):
                break
            if 'message' in item and item['code'] == 88:
                print('SUSPEND, RATE LIMIT EXCEEDED: %s' % item['message'])
                break
            if (tDay - 1 == day):
                count += 1
                if (p):
                    print("Term: " + q + " on " + item["created_at"])

        if (debug):
            b = time.time()
            c = int(b - a)
            print("\nThis took " + str(round(c / 60, 2)) + " minutes")

        return count


#res = tweet.getTweetCount("qwertyuiop", False,True)
#print(res)
 def __init__(self):
     self.name = 'TwitterApi'
     self.keys_dict = json.loads(open(twitter_key_path, 'r').read())
     self.twitter_api = twitter.TwitterAPI(
         consumer_key=self.keys_dict['consumer_key'],
         consumer_secret=self.keys_dict['consumer_secret'],
         access_token_key=self.keys_dict['access_token_key'],
         access_token_secret=self.keys_dict['access_token_secret'])
     self.querying_types = [
         # TwitterApi.ResultType.popular,
         # TwitterApi.ResultType.recent,
         TwitterApi.ResultType.mixed
     ]
def search_users(query):
    "performs a user/search query and returns list of most relevant users name"
    # needs a connection with oAuth1 ie with user credentials
    # we use Jafeel2 ;)
    consumer_key = 'ZuFJRql4R1wLe3vOP9IcD8B5t'
    consumer_secret = 'XtrinZVQbvHYCcc0IpHDG7NA4DuocdjHIdQ1Dt7u5w8vkxhonk'
    access_token_key = '137058679-CVBZBXfDCyox60tWZVbHcHK1cSWROXzxjzRfdO6s'
    access_token_secret = 'yS2Zb2g5pD4QuUbgYRFt966xPqfmB3AZzlbfeKd0Nrbyo'
    api2 = TwitterAPI(consumer_key,
                 consumer_secret,
                 access_token_key,
                 access_token_secret)
    page = 1
    nb_results = 10
    # r = api2.request('users/search', {'q':query, 'count':nb_results }).json()
    r = api2.request('users/search', {'q':query, 'page':page, 'count':nb_results }).json()
    
    user_list = []
    for item in r:
        user_list.append(item['screen_name'])

    print user_list
    return user_list
Beispiel #25
0
def test_app_already_retweeted(event, mocker):
    r = requests.Response()
    r.status_code = 403
    error = {
        "errors": [{
            "code": 327,
            "message": "You have already retweeted this Tweet."
        }]
    }
    r._content = json.dumps(error).encode()
    res = TwitterAPI.TwitterResponse(r, None)
    mocker.patch('TwitterAPI.TwitterAPI.request', return_value=res)

    ret = app.handle(event, config)
    assert ret == {'MessageIds': ['dummy']}
def download_user_timelines(political_moderate_list: list,
                            right_wing_populists_list: list):
    """
    Downloads user timelines of users featured in below lists. The downloads are used as training material for AI
    training. All lists are just incomplete examples.
    :return:
    """

    #List examples
    # political_moderate_list = ['_pik_dame_', 'Leekleinkunst', 'MartinaKraus7', 'KAFVKA', 'Volksverpetzer', 'insideX',
    #                            'FranziLucke', 'leonie_stella9', 'Ute631', 'justMPO', 'anouk_avf', 'Komisaar',
    #                            'MenschBernd', 'von_ems', 'lies_das', 'seewanda', 'Rene_Bacher', 'Prasanita93',
    #                            'IgorUllrich', 'AJSalzgitter', 'Bussi72', 'HuWutze', 'strahlgewitter', 'PhilKupi',
    #                            'BaldusEla', 'LarsKlingenberg', 'MichaelSchfer71', 'EddyAges', 'veripot', 'JoernPL',
    #                            'ondreka', 'kleinerJedi', 'DanielKinski', 'wfh7175', 'Sister_records1', 'TinaJergerkamp']
    # right_wing_populists_list = ['Junge_Freiheit', 'zaferflocken', 'HelmutWachler', 'M_Briefing', 'TinVonWo', 'mcwd12',
    #                              'EBlume3', 'h_hendrich']

    #Political unpolitical stance is currently not used
    # Tweets of below accounts will be downloaded from twitter. During model a subset of below accounts might be used.
    # unpolitical_list = ['Podolski10', 'fckoeln', 'FCBayern', 'BVB', 'rtl2', 'DMAX_TV', 'tim_kocht', 'grandcheflafer',
    #                     'bildderfrau', 'gala', 'BUNTE', 'promiflash', 'funny_catvideos', 'BibisBeauty', 'dagibee',
    #                     'siggismallz', 'Gronkh', 'CHIP_online', 'COMPUTERWOCHE', 'SkySportNewsHD', 'MOpdenhoevel',
    #                     'kayefofficial', 'VOGUE_Germany', 'lucycatofficial', 'RealLexyRoxx', 'AnselmSchindler',
    #                     'pentru_tine', 'KaJa80028344']

    #unpolitical_list = ['Podolski10'] For Testing

    # political_list = ['Thomas_Ehrhorn', 'HilseMdb', 'DirkSpaniel', 'MdB_Lucassen', 'RolandTichy', 'UllmannMdB',
    #                   'c_jung77', 'michael_g_link', 'theliberalfrank', 'IreneMihalic', 'KaiGehring', 'RenateKuenast',
    #                   'GoeringEckardt', 'MdB_Freihold', 'ZaklinNastic', 'PetraPauMaHe', 'lgbeutin', 'arnoklare',
    #                   'zierke', 'Timon_Gremmels', 'Johann_Saathoff', 'uhl_markus', 'AnjaKarliczek', 'KLeikert',
    #                   'Junge_Gruppe']

    user_lists = {
        'political_moderate_list': political_moderate_list,
        'right_wing_populists_list': right_wing_populists_list
    }

    # List Download
    for list_name, username_list in user_lists.items():
        for element in username_list:
            TwitterAPI.API_tweet_multitool(element,
                                           list_name,
                                           pages=10,
                                           method='user_timeline',
                                           append=True,
                                           write_to_db=True)
def fetch(api, resource, search_term, query):
    filename = '{0}/{1}.yaml'.format(resource.replace('/', '_'), search_term)

    utils.ensure_dir(filename)

    tweets = []
    try:
        r = TwitterAPI.TwitterPager(api, resource, query)
        for tweet in r.get_iterator():
            tweets.append(tweet)

    except KeyboardInterrupt:
        pass

    with open(filename, 'w') as output_file:
        yaml.dump(tweets, output_file, default_flow_style=False)
def get_user_tweet_replies(api, screen_name):
    params = {'q': screen_name}

    r = TwitterAPI.TwitterPager(api, 'search/tweets', params)

    items = []
    for item in r.get_iterator():
        if 'in_reply_to_screen_name' in item and item[
                'in_reply_to_screen_name'] == screen_name:
            items.append(item)

    filename = f'reply_tweets/{screen_name}.yaml'
    utils.ensure_dir(filename)

    with open(filename, 'w') as output_file:
        yaml.dump(items, output_file, default_flow_style=False)
Beispiel #29
0
def get_results() -> List[Dict[str, Any]]:
    """
    Get the results from the competition and print it out
    :return: Winner's name and their query
    """
    valid_entries: List[Dict[str, Any]] = []

    logging.info('GET RESULTS')

    json_db: Dict[str, Any] = load_json_db(TWEET_DATABASE)
    max_key: str = max(json_db.keys())

    r = TwitterAPI.TwitterPager(twitter_api, 'statuses/mentions_timeline', {
        'count': 200,
        'since_id': json_db[max_key]['tweet_id']
    })

    for item in r.get_iterator():
        if 'text' not in item:
            logging.warning('SUSPEND, RATE LIMIT EXCEEDED: %s\n' %
                            item['message'])
            break

        logging.info('[TWEET] ' + item['user']['screen_name'] + ': ' +
                     item['text'])
        for url in item['entities']['urls']:
            test_url = url['expanded_url']
            if 'scryfall.com' not in test_url:
                continue

            logging.info('{} submitted solution: {}'.format(
                item['user']['screen_name'], test_url))
            test_query_results = test_query(item['user']['screen_name'],
                                            test_url)
            if test_query_results:
                valid_entries.append({
                    'name': item['user']['screen_name'],
                    'length': len(test_query_results),
                    'query': test_query_results
                })

    return valid_entries
Beispiel #30
0
class Twitter:
    """Twitter API abstraction class"""

    CONSUMER_KEY = ""
    CONSUMER_SECRET = ""
    ACCESS_TOKEN = ""
    ACCESS_TOKEN_SECRET = ""
    test = True

    def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, test):
        self.CONSUMER_KEY =  consumer_key
        self.CONSUMER_SECRET =  consumer_secret
        self.ACCESS_TOKEN =  access_token
        self.ACCESS_TOKEN_SECRET = access_token_secret
        self.test = test

    def auth(self):
        self.api = TwitterAPI(self.CONSUMER_KEY, self.CONSUMER_SECRET, self.ACCESS_TOKEN, self.ACCESS_TOKEN_SECRET)

    def timeline(self, screen_name):
        try:
            if self.test:
                response = TwitterPager(self.api, \
                                                "statuses/user_timeline", \
                                                {"screen_name": screen_name, \
                                                "count": 200, \
                                                "include_rts": True, \
                                                "trim_user": True})
            else:
                response = TwitterPager(self.api, \
                                                'tweets/search/%s/:%s' % ("30day", "test"), \
                                                {'query': 'from:' + screen_name})
            return response.get_iterator()
        except Exception as e:
            print e
        return response.get_iterator(wait=5)

    def tweet(self, id):
        return self.api.request('statuses/show/:%d' % id)

    def close(self):
        return None
def fetch(api, resource, search_term, query):
    filename = '{0}/{1}.yaml'.format(resource.replace('/', '_'), search_term)

    utils.ensure_dir(filename)
    count = 0
    try:
        with open(filename, 'a') as output_file:
            r = TwitterAPI.TwitterPager(api, resource, query)

            for tweet in r.get_iterator():
                if not 'retweeted_status' in tweet and not (
                        "Tôi đã thích video"
                        or "Tôi đã thêm video") in tweet['text']:
                    yaml.dump([tweet['text']],
                              output_file,
                              default_flow_style=False)
                    count += 1
                    print(count, tweet['text'])
    except KeyboardInterrupt:
        pass
Beispiel #32
0
def postTweet(apiKey, apiSecret, accessToken, accessTokenSecret, text): 
    api = TwitterAPI(apiKey, apiSecret, accessToken, accessTokenSecret)

    r = api.request('statuses/update', {'status': text})

    print('SUCCESS' if r.status_code == 200 else 'FAILURE')
Beispiel #33
0
def streamTweets(apiKey, apiSecret, accessToken, accessTokenSecret,
                 trackTerm, lang='-', nTweets=100, nFlush=100, media='-'): #,
                 #mSize='medium', saveMedia=False, viewMedia=True,
                 #workDir='cache', saveLog=True, logName='log.txt'):

    api = TwitterAPI(apiKey, apiSecret, accessToken, accessTokenSecret)

    # Create directories and files, etc.
    #curTime = time.strftime("%d_%b_%Y_%H.%M.%S")
    #if not saveMedia:
    #    workDir = "cache"

    #if viewMedia:
    #    if not os.path.exists(workDir):
    #        os.makedirs(workDir)
    #    os.chdir(workDir)

    #if saveLog:
    #    f = open(logName, "w")

    #print("Started fetching will following parameters:")
    #print("trackTerm: ", trackTerm)
    #print("lang: ", lang)
    #print("nTweets: ", nTweets)
    #print("nFlush: ", nFlush)
    #print("media: ", media)
    #print("mSize: ", mSize)
    #print("viewMedia: ", viewMedia)
    #print("saveMedia: ", saveMedia)
    #print("workDir: ", workDir)
    #print("saveLog: ", saveLog)
    #print("logName: ", logName)

    # Create counters
    current = 0
    total = 0
    data = []
    stream = []
 
    # Create table header for printing
    tableHeader=["Name", "Handle", "Text", "Time stamp", "Hashtags", 
                 "Retweets", "Favorites", "Media", "Language", "Img Path"]
    keys = ["name", "handle", "content", "time", "hashtags", "rtCount", 
            "favCount", "media", "lang", "imgName"]

    # Search
    r = api.request('statuses/filter', {'track': trackTerm})

    # For each tweet
    for tweet in r.get_iterator():
        if 'text' in tweet: # if it's really a tweet and not something else
            #print(tweet)
            #break
            # Check if it fits the media requirements (yes, no, don't care)
            if media != "-":
                cMedia = True if media == True else False 
                if ('media' not in tweet['entities']) & (cMedia == True):
                    continue 
                elif ('media' in tweet['entities']) & (cMedia == False):
                    continue
            else:
                if 'media' in tweet['entities']:
                    cMedia = True 
                else:
                    cMedia = False 

            # Check if it fits the language requirements (anything or specific)
            if lang != "-":
                tLang = lang
                if tweet['metadata'] != tLang:
                    continue
            else:
                tLang = tweet['lang']

            # If no hashtags
            if tweet['entities']['hashtags']:
                hashtags = "" 
                for tag in tweet['entities']['hashtags']:
                    hashtags = hashtags + tag['text'] + ", "
            else:
                hashtags = None 

            fileName = None

            #if cMedia & viewMedia:
            #    cMedia += len(tweet['entities']['media'])
            #    mediaURL = tweet['entities']['media'][0]['media_url']
            #    fileName = str(total)+mediaURL[-4:] # last 4 are extension 
            #    urllib.request.urlretrieve(mediaURL+":"+mSize, fileName)

            # Push the tweet onto the stream stack
            stream.append([tweet['user']['name'], tweet['user']['screen_name'], 
                        tweet['text'].replace('\n', ' '), tweet['created_at'],
                        hashtags, tweet['retweet_count'], tweet['favorite_count'],
                        cMedia, tLang, fileName]) 

            # increment the counters
            current += 1
            total += 1

            # every 100 tweets, flush the stream to improve performance and add to a big stream
            if current == nFlush:
                dictList = []
                for i in range(current):
                    dictList.append(dict(zip(keys, stream[i])))

                print("Updating database. Total = ", total)
                dbTest.updateDB(dictList)
                stream = []         # empty stack
                current = 0         # reset counter
            
            # max number of tweets
            if total >= nTweets:
                dictList = []
                for i in range(current):
                    dictList.append(dict(zip(keys, stream[i])))

                print("Updating database. Total = ", total)
                dbTest.updateDB(dictList)
                break
        # this should not trigger, but just in case
        # this handles an exception triggered when we send more than 1 request every 5 seconds
        # this would result in a 15 minute block
        elif 'message' in item and item['code'] == 88:
            print('SUSPEND, RATE LIMIT EXCEEDED: %s' % item['message'])
            break
        #print(count)

    # print table
    #if saveLog:
    #    table = tabulate(data, headers=tableHeader, tablefmt='fancy_grid')
    #    f.write(table)
    
    #for i in range(total):
    #    print(data[i]) 
    #os.chdir("..")
    print("Done Streaming!")
from TwitterAPI import *
import requests
import os

consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_SECRET']
access_token_key = os.environ['TWITTER_ACCESS_TOKEN']
access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']

api = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)

gcm_api_key = os.environ['GCM_API_KEY']
gcm_url = 'http://exp-warenix.rhcloud.com/gcm/sendMessage1to1'

while True:
    try:
        #r = api.request('statuses/filter', {'track': 'hong kong', 'location': '22.25,114.1667'})
        r = api.request('user')
        iterator = r.get_iterator()
        for item in iterator:
            if 'text' in item:
                text = item['text'].encode('utf-8', 'ignore')
                name = item['user']['name'].encode('utf-8', 'ignore')
                screen_name = item['user']['screen_name'].encode('utf-8', 'ignore')
                id_str = item['id_str']
                profile_image_url = item['user']['profile_image_url']

                print  "%s(@%s)\t\t: %s" % (name, screen_name, text)
                print
                data  = {
                        'name': name,
                 access_token_secret)
    page = 1
    nb_results = 10
    # r = api2.request('users/search', {'q':query, 'count':nb_results }).json()
    r = api2.request('users/search', {'q':query, 'page':page, 'count':nb_results }).json()
    
    user_list = []
    for item in r:
        user_list.append(item['screen_name'])

    print user_list
    return user_list


#authentification
api = TwitterAPI('BQBZTbY3ugTypaRBq7Is0m6Dh', 'JGeRqs3r42Id4W2Q47NlGwAlNYv0myrBhlUPJeeizQXi56RWBm', auth_type='oAuth2')
print("authentification success\n")

name_query = "trump" #THIS WILL BE NOT HARDCODED AND WILL BE TAKEN FROM THE FRONT END
count = 10

# requests to twitter, one for one's tweet the other for one's personal info
r = api.request('statuses/user_timeline', {'screen_name': name_query, 'count':count, 'exclude_replies':'true', 'include_rts':'false'})
personalInfoResponse = api.request('users/show', {'screen_name' : name_query, 'include_entities' : 'false'}).json()

if 'errors' in personalInfoResponse:
    error = personalInfoResponse['errors']
    print error[0]['message']
    if error[0]['code'] == 50:
        #user not found
        search_users(name_query)
Beispiel #36
0
def display(request):
    context = {}
    bio_output = ""
    tweets_output = ""

    name_query = 'kanyewest'

    if request.method == "POST":
        name_query = request.POST['query_text']
        name_query = name_query.replace('@', '')
    


  # Authentification
    api = TwitterAPI('BQBZTbY3ugTypaRBq7Is0m6Dh', 'JGeRqs3r42Id4W2Q47NlGwAlNYv0myrBhlUPJeeizQXi56RWBm', auth_type='oAuth2')



    # requests to twitter, one for one's tweet the other for one's personal info
    count = 200
    tweets = api.request('statuses/user_timeline', {'screen_name': name_query, 'count':count, 'exclude_replies':'true', 'include_rts':'false'})
    otherTweets = api.request('search/tweets', {'q': '@'+name_query, 'count':count, 'lang': 'en'});
    personalInfoResponse = api.request('users/show', {'screen_name' : name_query, 'include_entities' : 'false'}).json()

    # Error testing, if user not found, we search for one
    if 'errors' in personalInfoResponse:
        error = personalInfoResponse['errors']
        name_query = 'kanyewest'
        tweets = api.request('statuses/user_timeline', {'screen_name': name_query, 'count':count, 'exclude_replies':'true', 'include_rts':'false'})
        personalInfoResponse = api.request('users/show', {'screen_name' : name_query, 'include_entities' : 'false'}).json()
        bio_output = json.dumps(build_bio_dict(personalInfoResponse))
        tweets_output = json.dumps(build_tweets_dict(tweets))
        other_tweets_output = json.dumps(build_other_tweets(otherTweets))
        top_hashtags = json.dumps(build_hashtag_freq(tweets))
        context = {
            'query': name_query
        }

        context = RequestContext(request, context)
        context['bio_data'] = bio_output
        context['tweet_data'] = tweets_output
        context['top_hashtags'] = top_hashtags
        context['other_tweets'] = other_tweets_output

        return render_to_response('feels/display.html', context)

    else:
        bio_output = json.dumps(build_bio_dict(personalInfoResponse))
        tweets_output = json.dumps(build_tweets_dict(tweets))
        top_hashtags = json.dumps(build_hashtag_freq(tweets))
        other_tweets_output = json.dumps(build_other_tweets(otherTweets))
        context = {
          'query': name_query
        }

        context = RequestContext(request, context)
        context['bio_data'] = bio_output
        context['tweet_data'] = tweets_output
        context['top_hashtags'] = top_hashtags
        context['other_tweets'] = other_tweets_output

        return render_to_response('feels/display.html', context)
Beispiel #37
0
    languages.append(PORTUGUESE)
    languages.append(SPANISH)
    return languages


# Language parameters for the twitter search query
FRENCH = 'lang%3Afr'
ENGLISH = 'lang%3Aen'
PORTUGUESE = 'lang%3Apt'
SPANISH = 'lang%3Aes'
CATALAN = 'lang%3Acat'

consumer_key='rW3r87OsW7V5WcL8uNc9m5kmU',
consumer_secret='zsjxaiYf23J5IsJjhZxodrVDVJkBUgWAVfEyBR2bfBwz5GZngV',
access_token_key='169260565-tlC8ZpYAD4eyyqPaPpzwYD3UPrxgBV9VQ0zxuu3D',
access_token_secret='mKOYFGuklZfo4X5m67pW3LZrB8rnXJlhsKCPasidzSwKt'

languages = createLanguagesList()

# This method creates a database in Parse.com and performs a search in twitter for 4 languages EN,ES,PT,FR
TwitterAPI.performTwitterSearch(consumer_key,consumer_secret,access_token_key,access_token_secret,languages)

# After creating the online database, we can retrieve the db items and create local datasets

# These methods create the local datasets files
localTweets.writeFile("datasets/eng_tweets_x.txt",'en')
localTweets.writeFile("datasets/es_tweets_x.txt",'es')
localTweets.writeFile("datasets/pt_tweets_x.txt",'pt')
localTweets.writeFile("datasets/fr_tweets_x.txt",'fr')