def Tweets(): try: max_feeds = 10 tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_language('en') tso.set_include_entities( False) # and don't give us all those entity information tso.set_until(new_date) tso.arguments.update({'tweet_mode': 'extended'}) tso.arguments.update({'truncated': 'False'}) ts = TwitterSearch(consumer_key='', consumer_secret='', access_token='', access_token_secret='', proxy='http://proxy_address') for c in range(len(MainDF)): count = 0 #kw=[MainDF['twitter'][c]] #for h in MainDF['hashtag'][c]: # kw.append(h) tso.set_keywords(MainDF['hashtag'][c]) tweets_list = [] tuo = TwitterUserOrder(MainDF['twitter'][c]) # tuo.set_language('en') tuo.set_include_entities( False) # and don't give us all those entity information # tuo.set_until(days_ago) # tuo.set_count(15) tuo.arguments.update({'tweet_mode': 'extended'}) tuo.arguments.update({'truncated': 'False'}) #for tweet in ts.search_tweets_iterable(tso): # print(tweet) # tweets_list.append([tweet['user']['screen_name'],tweet['full_text']]) for tweet in ts.search_tweets_iterable(tso): if 'retweeted_status' in tweet: None #tweets_list.append([tweet['user']['screen_name'],tweet['retweeted_status']['full_text'],'Retweet of ' + tweet['retweeted_status']['user']['screen_name']]) else: links = Find(tweet['full_text']) links = ', '.join(link for link in links) #print(tweet) tweets_list.append([ MainDF['company'][c], tweet['user']['screen_name'], tweet['full_text'], tweet['created_at'], links ]) for tweet in ts.search_tweets_iterable(tuo): if tweet['lang'] != 'en': #print(tweet) None else: # print(tweet) links = Find(tweet['full_text']) links = ', '.join(link for link in links) tweets_list.append([ MainDF['company'][c], tweet['user']['screen_name'], tweet['full_text'], tweet['created_at'], links ]) count = count + 1 if count == max_feeds: break if tweets_list != []: tweets_datasets[MainDF['company'][c]] = pd.DataFrame( tweets_list) tweets_datasets[MainDF['company'][c]].columns = [ 'Company', 'Source/User', 'Title/Tweet', 'Date', 'Link' ] tweets_datasets[MainDF['company'][c]].insert( 0, 'Category', 'Twitter') for i in range( len(tweets_datasets[MainDF['company'][c]]['Date'])): tweets_datasets[MainDF['company'][c]]['Date'][i] = parse( tweets_datasets[MainDF['company'][c]]['Date'][i]) tweets_datasets[ MainDF['company'][c]]['Date'][i] = tweets_datasets[ MainDF['company'][c]]['Date'][i].date() #print(datasets[companies_names[count]]) tw_current_companies.append(MainDF['company'][c]) else: None #tweets_list.append() #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def Tweets(): try: max_feeds = 10 tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.set_language('en') tso.set_include_entities( False) # and don't give us all those entity information tso.set_until(new_date) tso.arguments.update({'tweet_mode': 'extended'}) tso.arguments.update({'truncated': 'False'}) ts = TwitterSearch( consumer_key='DMHjSht5U0UqNUsAWpZH9DXok', consumer_secret= 'olCjsx8LltiHxEiPHWafExoibDuu4eZT48udXTeSYcQbLQ3juB', access_token='1170976252213125121-ftEg9MzF9siFHUmcUkV6zzT7mQV9Db', access_token_secret='eNA62T8Ig40Iz1wmKf6baDGHqY3Wh9kxzu9oaOQdGE9h8', ) for c in range(len(MainDF)): count = 0 #kw=[MainDF['twitter'][c]] #for h in MainDF['hashtag'][c]: # kw.append(h) tso.set_keywords(MainDF['hashtag'][c]) tweets_list = [] tuo = TwitterUserOrder(MainDF['twitter'][c]) # tuo.set_language('en') tuo.set_include_entities( False) # and don't give us all those entity information # tuo.set_until(days_ago) # tuo.set_count(15) tuo.arguments.update({'tweet_mode': 'extended'}) tuo.arguments.update({'truncated': 'False'}) #for tweet in ts.search_tweets_iterable(tso): # print(tweet) # tweets_list.append([tweet['user']['screen_name'],tweet['full_text']]) for tweet in ts.search_tweets_iterable(tso): if 'retweeted_status' in tweet: None #tweets_list.append([tweet['user']['screen_name'],tweet['retweeted_status']['full_text'],'Retweet of ' + tweet['retweeted_status']['user']['screen_name']]) else: links = Find(tweet['full_text']) links = ', '.join(link for link in links) #print(tweet) tweets_list.append([ MainDF['company'][c], tweet['user']['screen_name'], tweet['full_text'], tweet['created_at'], links ]) for tweet in ts.search_tweets_iterable(tuo): if tweet['lang'] != 'en': #print(tweet) None else: # print(tweet) links = Find(tweet['full_text']) links = ', '.join(link for link in links) tweets_list.append([ MainDF['company'][c], tweet['user']['screen_name'], tweet['full_text'], tweet['created_at'], links ]) count = count + 1 if count == max_feeds: break if tweets_list != []: tweets_datasets[MainDF['company'][c]] = pd.DataFrame( tweets_list) tweets_datasets[MainDF['company'][c]].columns = [ 'Company', 'Source/User', 'Title/Tweet', 'Date', 'Link' ] tweets_datasets[MainDF['company'][c]].insert( 0, 'Category', 'Twitter') for i in range( len(tweets_datasets[MainDF['company'][c]]['Date'])): tweets_datasets[MainDF['company'][c]]['Date'][i] = parse( tweets_datasets[MainDF['company'][c]]['Date'][i]) tweets_datasets[ MainDF['company'][c]]['Date'][i] = tweets_datasets[ MainDF['company'][c]]['Date'][i].date() #print(datasets[companies_names[count]]) tw_current_companies.append(MainDF['company'][c]) else: None #tweets_list.append() #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) ) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)