def poll(self):
        tso = TwitterSearchOrder()
        tso.set_keywords(self._config["query"])

        language = self._config.get("language", None)
        if language:
            tso.set_language(language)

        tso.set_result_type("recent")
        tso.set_count(self._config.get("count", 30))
        tso.set_include_entities(False)

        last_id = self._get_last_id()

        if last_id:
            tso.set_since_id(int(last_id))

        try:
            tweets = self._client.search_tweets(tso)
            tweets = tweets["content"]["statuses"]
        except Exception as e:
            self._logger.exception("Polling Twitter failed: %s" % (str(e)))
            return

        tweets = list(reversed(tweets))

        if tweets:
            self._set_last_id(last_id=tweets[-1]["id"])

        for tweet in tweets:
            self._dispatch_trigger_for_tweet(tweet=tweet)
Beispiel #2
0
def coleta_tweets():

    try:
    
        ts = TwitterSearch(
            consumer_key = '',
            consumer_secret = '',
            access_token = '',
            access_token_secret = ''
        )
    
        tso = TwitterSearchOrder()
        tso.set_keywords(['Harry potter'])
        tso.set_language('pt')
        df = []
        for tweet in ts.search_tweets_iterable(tso):
            df.append('@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'])+',')
            #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text']) )
        print('Coleta finalizada!')
        
        df = pd.DataFrame(df)
        #df.to_csv('tweets.txt')
        #print('Arquivo salvo.')
        return df
    except TwitterSearchException as e:
        print(e)
Beispiel #3
0
def cmd(send, msg, args):
    """
    Search the Twitter API.
    Syntax: {command} <query> <--user username> <--count 1>
    """
    if not msg:
        send('What do you think I am, a bird?')
        return

    parser = arguments.ArgParser(args['config'])
    parser.add_argument('query', nargs='*')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--user', dest='user', default=None)
    group.add_argument('--count', dest='count', type=int, default=1)
    group.add_argument('--random', action='store_true', default=False)

    try:
        cmdargs = parser.parse_args(msg)
    except arguments.ArgumentException as e:
        send(str(e))
        return

    api = get_search_api(args['config'])

    query = TwitterSearchOrder()
    keywords = [' '.join(cmdargs.query)]
    if cmdargs.user:
        keywords += [f'from:{cmdargs.user}']
    query.set_keywords(keywords)
    query.set_language('en')
    query.set_result_type('recent')
    query.set_include_entities(False)
    query.set_count(cmdargs.count)

    results = list(api.search_tweets_iterable(query))
    if not results:
        send('No tweets here!')
        return

    if cmdargs.random:
        shuffle(results)

    max_chan_tweets = 5
    max_pm_tweets = 25
    if cmdargs.count > max_pm_tweets:
        send(
            f"That's too many tweets! The maximum allowed through PM is {max_pm_tweets}"
        )
        return

    if cmdargs.count > max_chan_tweets:
        send(
            f"That's a lot of tweets! The maximum allowed in a channel is {max_chan_tweets}"
        )

    for i in range(0, min(cmdargs.count, max_pm_tweets)):
        if cmdargs.count <= max_chan_tweets:
            send(tweet_text(results[i]))
        else:
            send(tweet_text(results[i]), target=args['nick'])
Beispiel #4
0
    def post(self):
        query_string = self.request.body_arguments.get('query')
        query = TwitterSearchOrder()
        query.set_keywords(query_string)
        query.set_language('en')
        query.set_include_entities(False)
        results = TwitterClient.search_tweets(query)

        tweets = [tweet['text'] for tweet in results['content']['statuses']]
        tweet_results = indicoio.batch_text_tags(tweets)
        n_tweets = float(len(tweet_results))
        
        scores = defaultdict(float)
        for tweet in tweet_results:
            for category, score in tweet.items():
                scores[category] += score / n_tweets

        category = max(scores, key=lambda x: scores[x])

        data = {
            'scores': scores,
            'category': category
        }
        
        self.write(json.dumps(data))
Beispiel #5
0
def rq_tweets(game):
    """
    Request tweet metadata according to a game using the Twitter API
    """
    search = TwitterSearchOrder()
    search.set_keywords(game.c_name.split())
    search.set_language('en')

    try:
        for tweet_json in API.search_tweets_iterable(search):

            # Unit filtering
            if not validate_tweet(tweet_json):
                continue

            # Relevancy filtering
            if not relevant_tweet(game, tweet_json):
                continue

            # Remove unwanted information
            tweet_json = {
                'id': tweet_json['id'],
                'text': tweet_json['text'],
                'user': {
                    'name': tweet_json['user']['name']
                },
                'created_at': tweet_json['created_at']
            }

            # Finally add the tweet
            TC['Tweet.game_id'].add(
                CachedTweet(game_id=game.game_id, twitter_data=tweet_json))
    except TwitterSearchException:
        TC['Tweet.game_id'].flush()
        reload_api()
Beispiel #6
0
    def poll(self):
        tso = TwitterSearchOrder()
        tso.set_keywords(self._config['query'])

        language = self._config.get('language', None)
        if language:
            tso.set_language(language)

        tso.set_result_type('recent')
        tso.set_count(self._config.get('count', 30))
        tso.set_include_entities(False)

        last_id = self._get_last_id()

        if last_id:
            tso.set_since_id(int(last_id))

        try:
            tweets = self._client.search_tweets(tso)
            tweets = tweets['content']['statuses']
        except Exception as e:
            self._logger.exception('Polling Twitter failed: %s' % (str(e)))
            return

        tweets = list(reversed(tweets))

        if tweets:
            self._set_last_id(last_id=tweets[-1]['id'])

        for tweet in tweets:
            self._dispatch_trigger_for_tweet(tweet=tweet)
    def init_tw_search_lib(self, domain_keyword):
        """
        Init TwitterSearch Library 
        (Copyright (C) 2013 Christian Koepp
        https://github.com/ckoepp/TwitterSearch/tree/master)

        
        Arguments:
            domain_keyword {str} -- The keyword from <domain_keywords_dict> 
                                    that will be used to search in Twitter
        
        Returns:
            [TwitterSearch] -- TwitterSearch object with our secret tokens
            [TwitterSearchOrder] -- TwitterSearchOrder object with initialized attributes
        """

        try:
            tso = TwitterSearchOrder()  # create a TwitterSearchOrder object
            tso.add_keyword(
                domain_keyword)  # add keyword for search in Twitter
            tso.set_language('en')  # we want to see English tweets only
            tso.set_include_entities(
                False)  # and don't give us all those entity information

            # it's about time to create a TwitterSearch object with our secret tokens
            ts = TwitterSearch(
                consumer_key='<your-CONSUMER_KEY>',
                consumer_secret='<your-CONSUMER_SECRET>',
                access_token='<your-ACCESS_TOKEN>',
                access_token_secret='<your-ACCESS_TOKEN_SECRET>')

        except TwitterSearchException as e:  # take care of all those ugly errors if there are some
            print(e)

        return ts, tso
    def poll(self):
        tso = TwitterSearchOrder()
        tso.set_keywords(self._config['query'], True)

        language = self._config.get('language', None)
        if language:
            tso.set_language(language)

        tso.set_result_type('recent')
        tso.set_count(self._config.get('count', 30))
        tso.set_include_entities(False)

        last_id = self._get_last_id()

        if last_id:
            tso.set_since_id(int(last_id))

        try:
            tweets = self._client.search_tweets(tso)
            tweets = tweets['content']['statuses']
        except Exception as e:
            self._logger.exception('Polling Twitter failed: %s' % (str(e)))
            return

        tweets = list(reversed(tweets))

        if tweets:
            self._set_last_id(last_id=tweets[-1]['id'])

        for tweet in tweets:
            self._dispatch_trigger_for_tweet(tweet=tweet)
Beispiel #9
0
def coleta_tweets():

    try:

        ts = TwitterSearch(consumer_key='',
                           consumer_secret='',
                           access_token='',
                           access_token_secret='')

        tso = TwitterSearchOrder()
        tso.set_keywords(['Harry potter'])
        tso.set_language('pt')
        df = []
        for tweet in ts.search_tweets_iterable(tso):
            df.append('@%s tweeted: %s' %
                      (tweet['user']['screen_name'], tweet['text']) + ',')
            #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text']) )
        print('Coleta finalizada!')

        df = pd.DataFrame(df)
        #df.to_csv('tweets.txt')
        #print('Arquivo salvo.')
        return df
    except TwitterSearchException as e:
        print(e)
def SearchOnTwitter(keywords, language):
    """
    Allows to test twitter search library -> Print tweets of interest.
        Parameters:
            - keywords : string array that tweets must contain
            - language : string indicating the language of the interest tweets
        Return :
            - array of tweets
    """
    tweets = []
    try:
        tso = TwitterSearchOrder()  # create a TwitterSearchOrder object
        tso.set_keywords(
            keywords
        )  # let's define all words we would like to have a look for
        tso.set_language(language)  # we want to see German tweets only
        tso.set_include_entities(
            False)  # and don't give us all those entity information

        # it's about time to create a TwitterSearch object with our secret tokens
        ts = TwitterSearch(consumer_key=consumer_key,
                           consumer_secret=consumer_secret,
                           access_token=access_token,
                           access_token_secret=access_token_secret)

        # this is where the fun actually starts :)
        for tweet in ts.search_tweets_iterable(tso):
            tweets.append(tweet['text'])

    except TwitterSearchException as e:  # take care of all those ugly errors if there are some
        print(e)

    return tweets
Beispiel #11
0
def cmd(send, msg, args):
    """
    Search the Twitter API.
    Syntax: {command} <query> <--user username> <--count 1>
    """
    if not msg:
        send('What do you think I am, a bird?')
        return

    parser = arguments.ArgParser(args['config'])
    parser.add_argument('query', nargs='*')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--user', dest='user', default=None)
    group.add_argument('--count', dest='count', type=int, default=1)
    group.add_argument('--random', action='store_true', default=False)

    try:
        cmdargs = parser.parse_args(msg)
    except arguments.ArgumentException as e:
        send(str(e))
        return

    api = get_search_api(args['config'])

    query = TwitterSearchOrder()
    keywords = [' '.join(cmdargs.query)]
    if cmdargs.user:
        keywords += ['from:{}'.format(cmdargs.user)]
    query.set_keywords(keywords)
    query.set_language('en')
    query.set_result_type('recent')
    query.set_include_entities(False)
    query.set_count(cmdargs.count)

    results = list(api.search_tweets_iterable(query))
    if not results:
        send('No tweets here!')
        return

    if cmdargs.random:
        shuffle(results)

    max_chan_tweets = 5
    max_pm_tweets = 25
    if cmdargs.count > max_pm_tweets:
        send("That's too many tweets! The maximum allowed through PM is {}".format(max_pm_tweets))
        return

    if cmdargs.count > max_chan_tweets:
        send("That's a lot of tweets! The maximum allowed in a channel is {}".format(max_chan_tweets))

    for i in range(0, min(cmdargs.count, max_pm_tweets)):
        if cmdargs.count <= max_chan_tweets:
            send(tweet_text(results[i]))
        else:
            send(tweet_text(results[i]), target=args['nick'])
Beispiel #12
0
def getTweets(politician_id, searchOnlySexistWords):
    try:

        politician = Politician.objects.get(id=politician_id)
        politician_names = [
            politician.first_name + " " + politician.last_name,
            politician.username
        ]

        tso = TwitterSearchOrder()
        searchTerms = []

        if searchOnlySexistWords:
            sexistWords = CONFIG['SEXISTWORDS']
            for word in sexistWords:
                for politician_name in politician_names:
                    searchTerms.append(word + ' ' + politician_name)
        elif searchOnlySexistWords is False:
            searchTerms = politician_names

        tso.set_keywords(searchTerms, or_operator=True)
        tso.set_language("en")
        tso.set_include_entities(False)
        querystr = tso.create_search_url()
        tso.set_search_url(querystr + "&tweet_mode=extended")
        ts = TwitterSearch(consumer_key=CONFIG['CONSUMER_KEY'],
                           consumer_secret=CONFIG['CONSUMER_SECRET'],
                           access_token=CONFIG['ACCESS_TOKEN'],
                           access_token_secret=CONFIG['ACCESS_TOKEN_SECRET'])

        print("**Processing tweets for " +
              str(politician.first_name + " " + politician.last_name) + "**")
        if searchOnlySexistWords:
            tweets = ts.search_tweets_iterable(tso)
            return tweets
        else:
            # will limit to 100 if not only searching sexist words
            tweets = ts.search_tweets(tso)
            return tweets['content']['statuses']

    except TwitterSearchException as e:
        logging.exception("Unable to get new tweets because of" + str(e))
Beispiel #13
0
def gettwitter(query):
    try:
        tso = TwitterSearchOrder()
        tso.set_language('en')
        tso.set_locale('en')
        tso.set_keywords([query])
        url = "https://twitter.com/search"+tso.create_search_url()
        print url
    except TwitterSearchException as e:
        print(e)
    html = getHtml(url)
    soup = BeautifulSoup(html)
    twits = soup.find_all("p",class_="TweetTextSize")
    twitters=[]
    for t in twits:
        dr = re.compile(r'<[^>]+>',re.S)
        replacedStr = dr.sub('',str(t))
        replacedStr = re.sub(r"([a-zA-z]+://\S*\s{0,1})", "url", replacedStr)
        twitters.append(replacedStr+"\n")
    return twitters
Beispiel #14
0
    def post(self):
        query_string = self.request.body_arguments.get('query')
        query = TwitterSearchOrder()
        query.set_keywords(query_string)
        query.set_language('en')
        query.set_include_entities(False)
        results = TwitterClient.search_tweets(query)

        tweets = [tweet['text'] for tweet in results['content']['statuses']]
        tweet_results = indicoio.batch_text_tags(tweets)
        n_tweets = float(len(tweet_results))

        scores = defaultdict(float)
        for tweet in tweet_results:
            for category, score in tweet.items():
                scores[category] += score / n_tweets

        category = max(scores, key=lambda x: scores[x])

        data = {'scores': scores, 'category': category}

        self.write(json.dumps(data))
Beispiel #15
0
    def post(self):
        query_string = self.request.body_arguments.get('query')
        query = TwitterSearchOrder()
        query.set_keywords(query_string)
        query.set_language('en')
        query.set_include_entities(False)
        results = TwitterClient.search_tweets(query)

        tweets = [tweet['text'] for tweet in results['content']['statuses']]
        sentiment = indicoio.batch_sentiment(tweets)
        pairs = sorted(zip(sentiment, tweets))
        n_tweets = float(len(pairs))

        top_n = 5
        most_negative = pairs[:top_n]
        most_positive = list(reversed(pairs[-top_n:]))

        data = {
            'most_positive': most_positive,
            'most_negative': most_negative,
            'average': sum(sentiment)/n_tweets
        }
        
        self.write(json.dumps(data))
Beispiel #16
0
    def post(self):
        query_string = self.request.body_arguments.get('query')
        query = TwitterSearchOrder()
        query.set_keywords(query_string)
        query.set_language('en')
        query.set_include_entities(False)
        results = TwitterClient.search_tweets(query)

        tweets = [tweet['text'] for tweet in results['content']['statuses']]
        sentiment = indicoio.batch_sentiment(tweets)
        pairs = sorted(zip(sentiment, tweets))
        n_tweets = float(len(pairs))

        top_n = 5
        most_negative = pairs[:top_n]
        most_positive = list(reversed(pairs[-top_n:]))

        data = {
            'most_positive': most_positive,
            'most_negative': most_negative,
            'average': sum(sentiment) / n_tweets
        }

        self.write(json.dumps(data))
Beispiel #17
0
def getTweets(politician_id):
	try:

		politician = Politician.objects.get(id=politician_id)

		politician_names = [politician.first_name + " " + politician.last_name, politician.last_name, politician.username]
		print("Getting Tweets for " + str(politician.first_name + " " + politician.last_name))
		tso = TwitterSearchOrder()			
		sexistWords = ['bitch', 'skank', 'rape']
		searchTerms = []

		for word in sexistWords:
			for politician in politician_names:
				searchTerms.append(word + ' ' + politician)
		
		tso.set_keywords(searchTerms, or_operator=True)
		print(searchTerms)
		tso.set_language("en")
		tso.set_include_entities(False)
		querystr = tso.create_search_url()
		tso.set_search_url(querystr + "&tweet_mode=extended")

		ts = TwitterSearch(
            consumer_key = os.environ.get('CONSUMER_KEY', CONFIG['CONSUMER_KEY']),
            consumer_secret = os.environ.get('CONSUMER_SECRET', CONFIG['CONSUMER_SECRET']),
            access_token = os.environ.get('ACCESS_TOKEN', CONFIG['ACCESS_TOKEN']),
            access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET', CONFIG['ACCESS_TOKEN_SECRET'])
        )
		
		return ts.search_tweets_iterable(tso)

	except TwitterSearchException as e:
		logging.exception("Unable to get new tweets because of"  + str(e))

# if __name__ == "__main__":
#     getTweets()
Beispiel #18
0
    def search(self, query, lang='en', n=10**5):
        """
        Search twitter for specified query.
        Function returns n tweets or as many as can be found for that query.

        Parameters:
        query -- Search query (String)
        lang -- Specify language of tweets, optional, default: 'en' (String)
        n -- Number of tweets to return, optional, default: 10**3 (Int)

        Returns: 
        tweets_out -- Pandas series of tweets of length n
        """
        # Initialise container
        tweets_out = []
        # Setup twitter search
        tso = TwitterSearchOrder()
        tso.set_keywords([query])
        tso.set_language(lang)
        tso.set_include_entities(False)

        # Begin search
        sys.stdout.write("Tweet number out of {0}: ".format(n))
        for i, tweet in enumerate(self.twitter.search_tweets_iterable(tso)):
            # Break from loop when n tweets are reached
            if i == n:
                break
            # Output progress
            if i % 100 == 0:
                sys.stdout.write('{0} '.format(i))
                sys.stdout.flush()
            # Add the next tweet to the container
            tweets_out.append('%s' % (tweet['text']))
        print
        # Return as pandas series as it's easier to work with
        return pd.Series(tweets_out)
Beispiel #19
0
database = couchdb.Server()[COUCH_DATABASE_NAME]

# Setup a twitter connection and configure its credentials:
twitter_connection = TwitterSearch(**TWITTER_CREDENTIALS)

# The twitter client may stop iterating the tweets at some point.
# In order to automatically continue at the last position, we put the
# import in a "while"-loop which will be stopped when there are no new
# tweets to import.
while True:
    # First, let's build a search query:
    twitter_query = TwitterSearchOrder()
    twitter_query.set_keywords(TWITTER_SEARCH_KEYWORDS)
    # Only import english tweets as our sentiment analysis will only work
    # with the English language for now.
    twitter_query.set_language('en')
    # We do not require entities (e.g. extracted URLs) as we are only
    # interested in the raw text of the tweet.
    twitter_query.set_include_entities(False)

    document_ids = tuple(filter(lambda id_: not id_.startswith('_'), database))
    if len(document_ids) > 0:
        # If we already have imported tweets, we should continue with the oldest
        # tweet we know and work our way to older tweets from there.
        # We do that by setting the max_id query parameter to the oldest tweet
        # we know.
        oldest_id = min(document_ids)
        twitter_query.set_max_id(int(oldest_id))
        print('Continuing initial import from tweet {}'.format(oldest_id))
    else:
        print('Starting initial import on fresh database.')
def collect_tweets(keyword, count, force=False):    
    from TwitterSearch import TwitterSearch
    from TwitterSearch import TwitterSearchOrder
    import pymongo
    from dateutil.parser import parse
    from alchemyapi import AlchemyAPI
    import ConfigParser
    
    # try:
    #     keyword = sys.argv[1]
    #     count = int(sys.argv[2])
    # except IndexError:
    # 	e_too_few_args = "You did not enter enough arguments. Two are required: keyword, and count"
    # 	raise Exception(e_too_few_args)
    # try:
    #     if sys.argv[3] == '-f':
    #         force = True
    #     else:
    #         e_invalid_argument = "The only option available is -f. It is used to force the script to continue when the Alchemy API limit is exceeded."
    #         raise Exception(e_invalid_argument)    
    # except IndexError:
    #     force = False
    
    # Read the config file for config variables
    config = ConfigParser.RawConfigParser()
    config.read('config.cfg')
    mongo_url = config.get('Mongo', 'db_url')
    
    # Connect to the Mongo database using MongoClient
    
    client = pymongo.MongoClient(mongo_url)
    db = client.get_default_database()
    # Access/create the collection based on the command line argument
    tweets = db[keyword]
    
    #Generate the alchemyapi variable
    alchemyapi = AlchemyAPI()
    
    # To accommodate for hashtags the user can substitute a . for the # in the command line. Lines 30 & 31 return it to a hashtag for the search.
    if keyword[0] is ".":
        keyword = keyword.replace('.', '#')
    
    # Lines 33-42 ensure that the query is not doing duplicate work.
    # First, it counts to see how many documents exist in the collection
    db_count = tweets.count()
    
    # If there are documents in the collection, the collection is queried, tweet objects are sorted by date, and the tweet_id of the most recent tweet is retrieved and later set as the "since_id"
    if db_count is not 0:
        latest_id = tweets.find( {}, { 'object.tweet_id':1 } ).sort("startedAtTime").limit(1)
        latest_id_str = latest_id[db_count-1]['object']['tweet_id']
        latest_id_int = int(latest_id_str)
        print 'Count of documents in the ' + keyword + ' collection is not 0. It is ' + str(db_count) + '. Mongo is now identifying the latest tweet ID to append as a parameter to the API call.'
    # If ther are no documents in the collection, no queries are done, and the since_id is left out of the API call.    
    else:
        print 'The Mongo collection ' + keyword + ' is empty. The script will now collect all tweets.'
        
    # create a TwitterSearchOrder object
    tso = TwitterSearchOrder() 
    
    # let's define all words we would like to have a look for
    tso.set_keywords([keyword])
    
    # Select language
    tso.set_language('en') 
    
    # Include Entity information
    tso.set_include_entities(True)
    
    if db_count is not 0:
        tso.set_since_id(latest_id_int)
        print 'Since the document count in the ' + keyword + ' collection is above 0, the since_id uses the parameter of the latest tweet so that only new tweets are collected.'
    else:
    	print 'No documents exist in the ' + keyword + ' collection right now so the since_id parameter will be empty and all tweets will be collected.'
    
        
    # Create a TwitterSearch object with our secret tokens
    ts = TwitterSearch(
        consumer_key = config.get('Twitter', 'consumer_key'),
        consumer_secret = config.get('Twitter', 'consumer_secret'),
        access_token = config.get('Twitter', 'access_token'),
        access_token_secret = config.get('Twitter', 'access_token_secret')
     )
     
    # Perform the search
    twitter_search = ts.search_tweets_iterable(tso)

    # Start the insert count variable
    db_inserts = 0
    
    # this is where the fun actually starts :)
    try:
        for tweet in twitter_search:
            if db_inserts < count:
                mentions_list = []
                hashtags_list = []
                # Create the caliper_tweet object
                caliper_tweet = {
              "context": "http://purl.imsglobal.org/ctx/caliper/v1/MessagingEvent",
              "type": "MessagingEvent",
              "startedAtTime": "",
              ## Can be used to query Twitter API for user information
              "actor": "",
              "verb": "tweetSent",
              "object": {
                "type": "MessagingEvent",
                "tweet_id": "",
                "tweet_uri": "",
                "subtype": "tweet",
                ## "to" should be calculated by checking in_reply_to_user_id_str is null. If it's not null, then it should be concatenated to "uri:twitter/user/" and stored in "object"['to']
                "to": "",
                "author": {
                    "author_uri": "",
                    "author_alias": "",
                    "author_name": "",
                    },
                "text": "",
                "sentiment": {
                    "type": "",
                    "score": "",
                    "color": ""
                },
                "parent": "",
                ## "mentions" is an array of the caliper IDs from the user_mentions objects array
                "user_mentions": [],
                ## "hashtags" is an array of the hashtag texts included in the tweet entities
                "hashtags": []
              }
            }
                
                 # Set the re-usable variables
                tweet_text = tweet['text']
                
                ## AlchemyAPI Sentiment Analysis
                tweet_sentiment = ''
                response = alchemyapi.sentiment('text', tweet_text)
                if 'docSentiment' in response.keys():
                    if 'score' in response['docSentiment']:
                        tweet_sentiment_score = response['docSentiment']['score']
                        tweet_sentiment_score = float(tweet_sentiment_score)
                        tweet_sentiment_score = round(tweet_sentiment_score, 2)
                    else:
                        tweet_sentiment_score = 0
                    tweet_sentiment_type = response['docSentiment']['type']
                    tweet_sentiment_score_a = abs(tweet_sentiment_score)
                    if (tweet_sentiment_score) > 0:
                        tweet_sentiment_color = "rgba(0,255,0," + str(tweet_sentiment_score_a) + ")"
                    else: 
                        tweet_sentiment_color = "rgba(255,0,0," + str(tweet_sentiment_score_a) + ")"
                elif force == True:
                    print 'Force option set to true. The tweet_sentiment object will be set with API Limit Exceeded values.'
                    tweet_sentiment_type = 'API Limit Exceeded'
                    tweet_sentiment_score = 0
                    tweet_sentiment_color = 'rgba(0,0,0,0)'
                else:
                    e_alchemy_api_limit = 'Alchemy API daily limit exceeded. Retry search with force=True to continue'
                    raise Exception(e_alchemy_api_limit)
                    
            
                ds = tweet['created_at']
                tweet_date = parse(ds)
                caliper_tweet['startedAtTime'] = tweet_date
                caliper_tweet['actor'] = 'student:' + tweet['user']['screen_name']
                caliper_tweet['object']['tweet_uri'] = 'https://twitter.com/' + tweet['user']['screen_name'] + '/status/' + tweet['id_str']
                caliper_tweet['object']['tweet_id'] = tweet['id_str']
                if tweet['in_reply_to_user_id_str'] is None:
                    caliper_tweet['object']['to'] = 'NoReply'
                    caliper_tweet['object']['parent'] = 'NoReply'
                else:
                    caliper_tweet['object']['to'] = 'https://twitter.com/intent/user?user_id=' + tweet['in_reply_to_user_id_str']
                    if tweet['in_reply_to_status_id_str'] is None:
                        caliper_tweet['object']['parent'] = 'None'
                    else:    
                        caliper_tweet['object']['parent'] = 'https://twitter.com/' + tweet['user']['screen_name'] + '/status/' + tweet['in_reply_to_status_id_str']
                caliper_tweet['object']['author']['author_uri'] = 'https://twitter.com/intent/user?user_id=' + tweet['user']['id_str']
                caliper_tweet['object']['author']['author_alias'] = tweet['user']['screen_name']
                caliper_tweet['object']['author']['author_name'] = tweet['user']['name']
                caliper_tweet['object']['text'] = unicode(tweet['text'])
                caliper_tweet['object']['sentiment']['type'] = tweet_sentiment_type
                caliper_tweet['object']['sentiment']['score'] = tweet_sentiment_score
                caliper_tweet['object']['sentiment']['color'] = tweet_sentiment_color
                for x in list(tweet['entities']['hashtags']):
                    hashtag = x['text']
                    hashtags_list.append(hashtag)
                for x in list(tweet['entities']['user_mentions']):
                    mention = x['id_str']
                    mentions_list.append(mention)
                caliper_tweet['object']['user_mentions'] = mentions_list
                caliper_tweet['object']['hashtags'] = hashtags_list
             
                tweets.insert(caliper_tweet)
                
                db_inserts = db_inserts + 1
                
            else:
                raise StopIteration
    except StopIteration:
        print str(db_inserts) + " inserts made in the " + keyword + " collection."
Beispiel #21
0
def Tweets():

    try:

        max_feeds = 10
        tso = TwitterSearchOrder()  # create a TwitterSearchOrder object
        tso.set_language('en')
        tso.set_include_entities(
            False)  # and don't give us all those entity information
        tso.set_until(new_date)
        tso.arguments.update({'tweet_mode': 'extended'})
        tso.arguments.update({'truncated': 'False'})

        ts = TwitterSearch(consumer_key='',
                           consumer_secret='',
                           access_token='',
                           access_token_secret='',
                           proxy='http://proxy_address')

        for c in range(len(MainDF)):
            count = 0

            #kw=[MainDF['twitter'][c]]
            #for h in MainDF['hashtag'][c]:
            #    kw.append(h)

            tso.set_keywords(MainDF['hashtag'][c])
            tweets_list = []

            tuo = TwitterUserOrder(MainDF['twitter'][c])
            #            tuo.set_language('en')
            tuo.set_include_entities(
                False)  # and don't give us all those entity information
            #            tuo.set_until(days_ago)
            #            tuo.set_count(15)
            tuo.arguments.update({'tweet_mode': 'extended'})
            tuo.arguments.update({'truncated': 'False'})

            #for tweet in ts.search_tweets_iterable(tso):
            #    print(tweet)
            #    tweets_list.append([tweet['user']['screen_name'],tweet['full_text']])

            for tweet in ts.search_tweets_iterable(tso):
                if 'retweeted_status' in tweet:
                    None
                    #tweets_list.append([tweet['user']['screen_name'],tweet['retweeted_status']['full_text'],'Retweet of ' + tweet['retweeted_status']['user']['screen_name']])
                else:
                    links = Find(tweet['full_text'])
                    links = ', '.join(link for link in links)
                    #print(tweet)
                    tweets_list.append([
                        MainDF['company'][c], tweet['user']['screen_name'],
                        tweet['full_text'], tweet['created_at'], links
                    ])

            for tweet in ts.search_tweets_iterable(tuo):
                if tweet['lang'] != 'en':
                    #print(tweet)
                    None
                else:

                    # print(tweet)
                    links = Find(tweet['full_text'])
                    links = ', '.join(link for link in links)

                    tweets_list.append([
                        MainDF['company'][c], tweet['user']['screen_name'],
                        tweet['full_text'], tweet['created_at'], links
                    ])
                    count = count + 1

                    if count == max_feeds:
                        break

            if tweets_list != []:
                tweets_datasets[MainDF['company'][c]] = pd.DataFrame(
                    tweets_list)
                tweets_datasets[MainDF['company'][c]].columns = [
                    'Company', 'Source/User', 'Title/Tweet', 'Date', 'Link'
                ]
                tweets_datasets[MainDF['company'][c]].insert(
                    0, 'Category', 'Twitter')

                for i in range(
                        len(tweets_datasets[MainDF['company'][c]]['Date'])):

                    tweets_datasets[MainDF['company'][c]]['Date'][i] = parse(
                        tweets_datasets[MainDF['company'][c]]['Date'][i])
                    tweets_datasets[
                        MainDF['company'][c]]['Date'][i] = tweets_datasets[
                            MainDF['company'][c]]['Date'][i].date()

                    #print(datasets[companies_names[count]])

                tw_current_companies.append(MainDF['company'][c])

            else:
                None

            #tweets_list.append()
            #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) )

    except TwitterSearchException as e:  # take care of all those ugly errors if there are some
        print(e)
Beispiel #22
0
import sys
import urllib2
from collections import defaultdict

from TwitterSearch import TwitterSearchOrder
import indicoio
from pprint import pprint

from twitter_demo.twitter import TwitterClient
import twitter_demo.settings as settings

indicoio.config.api_key = "2e559e40a6ebea9dddb39e753363f6e1"
query_string = sys.argv[1:]
query = TwitterSearchOrder()
query.set_keywords(query_string)
query.set_language('en')
query.set_include_entities(False)
results = TwitterClient.search_tweets(query)

#print results['content']['statuses'][0].keys()

#tweets = [tweet['text'] for tweet in results['content']['statuses']]
tweets = []

for tweet in results['content']['statuses']:
    data = {'tweet_id': tweet['id_str'], 'tweet_text': tweet['text']}
    req = urllib2.Request('http://45.33.90.42:5000/insert_data')
    req.add_header('Content-Type', 'application/json')
    response = urllib2.urlopen(req, json.dumps(data))
    tweets.append(data)
Beispiel #23
0
def get_tweets():
    #run: ./manage.py shell -c="from tweets.utils import get_tweets; get_tweets()"

    #keywords = ["arrombada", "arrombado", "asno", "acefalo","babaca", "babuino", "baitola", "biba", "bicha", "bixa", "bixinha", "bobo", "boceta", "boquete", "borra", "bosta", "buceta", "boceta", "bundao", "burro", "cacete", "cadela", "cagar", "cala", "cale", "caralho", "caralio", "chupe", "come", "corno", "cu", "cusao", "cuzao", "desgracado", "disgracado", "egua", "enraba", "fdp", "fiderapariga", "fidumaegua", "filhodaputa", "filhodeumaputa", "foda", "fodase", "foder", "fuder", "fudeu", "fudido", "gay", "grelo", "idiota", "inferno", "jegue", "louco", "macaco", "mamar", "marica", "merda", "mijao", "otario", "pariu", "pau", "peidar", "pica", "pinto", "piriguete", "piroca", "piru", "porra", "puta", "quinto", "rapariga", "retardado", "rola", "siririca", "tesuda", "tomar", "vagabundo", "vaite", "veado", "velha", "viado", "xereca"]
    #keywords = ['foda se', 'foda-se', 'vai te foder', 'vai se foder', 'vai te fuder', 'vai se foder', 'vai tomar no cú', 'vai tomar no cu', 'vai toma no cú', 'vai toma no cu', 'vai tomar no rabo', 'vai toma no rabo', 'vai dar o cú', 'vai dar o cu', 'vai dar o rabo', 'filho da puta', 'filho da égua', 'filho da puta', 'filho da égua', 'filho de uma puta', 'filho de uma égua', 'filho duma puta', 'filho duma égua']
    #keywords = ['grande', 'bom', 'novo', 'pequeno', 'próprio', 'velho', 'cheio', 'branco', 'longo', 'único', 'alto', 'certo', 'só', 'possível', 'claro', 'capaz', 'estranho', 'negro', 'enorme', 'escuro', 'seguinte', 'mau', 'diferente', 'preciso', 'difícil', 'antigo', 'bonito', 'simples', 'forte', 'pobre', 'sério', 'belo', 'feliz', 'junto', 'vermelho', 'humano', 'inteiro', 'triste', 'importante', 'meio', 'sentado', 'fácil', 'verdadeiro', 'frio', 'vazio', 'baixo', 'terrível', 'próximo', 'livre', 'profundo', 'jovem', 'preto', 'impossível', 'vivo', 'largo', 'nu', 'necessário', 'azul', 'natural', 'quente', 'completo', 'verde', 'pesado', 'inglês', 'especial', 'rápido', 'igual', 'comprido', 'principal', 'breve', 'rico', 'seco', 'fino', 'geral', 'curto', 'chamado', 'pálido', 'leve', 'anterior', 'perfeito', 'grosso', 'direito', 'calado', 'interessante', 'amarelo', 'sujo', 'pronto', 'imenso', 'cansado', 'duro', 'doente', 'puro', 'presente', 'comum', 'brilhante', 'público', 'magro', 'grave', 'cego', 'deitado', 'vago', 'americano', 'alegre', 'real', 'raro', 'particular', 'calmo', 'ansioso', 'social', 'nervoso', 'má', 'silencioso', 'lento', 'semelhante', 'evidente', 'doce', 'curioso', 'normal', 'morto', 'horrível', 'familiar', 'lindo', 'caro', 'justo', 'distante', 'maravilhoso', 'inútil', 'dourado', 'gordo', 'preocupado', 'suave', 'agradável', 'pessoal', 'fraco', 'seguro', 'satisfeito', 'infeliz', 'sombrio', 'feminino', 'súbito', 'estreito', 'íntimo', 'sexual', 'interior', 'esquerdo', 'simpático', 'errado', 'contente', 'secreto', 'fresco', 'violento', 'solitário', 'político', 'moderno', 'extraordinário', 'vulgar', 'literário', 'ligeiro', 'interessado', 'eterno', 'habitual', 'falso', 'castanho', 'parado', 'inteligente', 'firme', 'misterioso', 'delicado', 'cinzento', 'intenso', 'doméstico', 'parecido', 'incapaz', 'profissional', 'disposto', 'absurdo', 'aberto', 'furioso', 'digno', 'grávido', 'amigo', 'quieto', 'físico', 'final', 'famoso', 'elegante', 'diverso', 'redondo', 'louco', 'local', 'imediato', 'provável', 'irritado', 'absoluto', 'respectivo', 'macio', 'santo', 'prestes', 'feio', 'desesperado', 'colorido', 'ridículo', 'precioso', 'imóvel', 'engraçado', 'útil', 'oposto', 'militar', 'determinado', 'britânico', 'vasto', 'religioso', 'ocupado', 'inquieto', 'impaciente', 'confuso', 'complicado', 'estúpido', 'perigoso', 'frágil', 'fechado', 'espesso', 'decidido', 'recente', 'central', 'alheio', 'agudo', 'surdo', 'minúsculo', 'húmido', 'francês', 'deserto', 'barato', 'sincero', 'sagrado', 'moral', 'luminoso', 'indiferente', 'doido', 'comercial', 'óbvio', 'obscuro', 'magnífico', 'excelente', 'inocente', 'total', 'selvagem', 'louro', 'cor-de-rosa', 'católico', 'amável', 'abafado', 'prático', 'pleno', 'infantil', 'liso', 'espantado', 'vão', 'perdido', 'grato', 'espanhol', 'divertido', 'desagradável', 'delicioso', 'consciente', 'assustado', 'português', 'mágico', 'mental', 'inferior', 'generoso', 'encantador', 'atento', 'invisível', 'industrial', 'exterior', 'estrangeiro', 'derradeiro', 'cruel', 'rígido', 'poderoso', 'modesto', 'italiano', 'gelado', 'farto', 'trémulo', 'oculto', 'nobre', 'infinito', 'inesperado', 'franco', 'encantado', 'amoroso', 'úmido', 'visível', 'severo', 'original', 'orgulhoso', 'mortal', 'esplêndido', 'coitado', 'subtil', 'constante', 'surpreendente', 'solene', 'médio', 'mudo', 'futuro', 'eléctrico', 'distinto', 'demasiado', 'culpado', 'confortável', 'banal', 'regular', 'razoável', 'molhado', 'esquisito', 'amplo', 'universitário', 'moreno', 'masculino', 'honesto', 'elevado', 'divino', 'discreto', 'confiante', 'aflito', 'zangado', 'tranquilo', 'surpreendido', 'preparado', 'miúdo', 'maldito', 'judeu', 'interno', 'insignificante', 'incrível', 'excitado', 'exacto', 'esperto', 'descalço', 'casado', 'ardente', 'típico', 'transparente', 'sólido', 'mero', 'humilde', 'doloroso', 'aéreo', 'autêntico', 'urgente', 'tenso', 'surpreso', 'sereno', 'sensível', 'querido', 'fatal', 'embaraçado', 'ruim', 'legal', 'fundo', 'contrário', 'chato', 'ausente', 'tímido', 'sinistro', 'privado', 'pensativo', 'melancólico', 'envergonhado', 'denso', 'definitivo', 'considerável', 'atrasado', 'assustador', 'artístico', 'tremendo', 'ruivo', 'mole', 'miserável', 'irritante', 'desconfiado', 'adequado', 'áspero', 'trêmulo', 'responsável', 'nítido', 'monstruoso', 'manso', 'fantástico', 'essencial', 'crescente', 'científico', 'bêbado', 'amargo', 'aliviado', 'afastado', 'telefónico', 'romântico', 'espiritual', 'cristão', 'brusco', 'apressado', 'remoto', 'insuportável', 'inicial', 'grosseiro', 'dramático', 'decente', 'admirável', 'acordado', 'sossegado', 'solteiro', 'singular', 'rouco', 'nocturno', 'loiro', 'judaico', 'irlandês', 'indignado', 'histórico', 'gracioso', 'admirado', 'supremo', 'solto', 'internacional', 'europeu', 'deprimido', 'brasileiro', 'atraente', 'animado', 'viúvo', 'ténue', 'terno', 'tamanho', 'ruidoso', 'relativo', 'penetrante', 'ordinário', 'numeroso', 'japonês', 'inúmero', 'interminável', 'ilustre', 'hesitante', 'gentil', 'feroz', 'barbudo', 'apaixonado', 'académico', 'aborrecido', 'tranqüilo', 'tradicional', 'popular', 'perturbado', 'oficial', 'limpo', 'inevitável', 'grisalho', 'extremo', 'desajeitado', 'complexo', 'artificial', 'abatido', 'virgem', 'valioso', 'sensato', 'risonho', 'piedoso', 'morno', 'metálico', 'incerto', 'idêntico', 'grego', 'gigantesco', 'fiel', 'excitante', 'esguio', 'célebre', 'apertado', 'vizinho', 'vacilante', 'universal', 'saudável', 'repleto', 'perpétuo', 'pardo', 'ofegante', 'mútuo', 'lustroso', 'isolado', 'inchado', 'gasto', 'formal', 'fascinado', 'exausto', 'escasso', 'dito', 'distraído', 'desportivo', 'contínuo', 'competente', 'clássico', 'civil', 'adulto', 'adormecido', 'técnico', 'suspeito', 'sensual', 'rigoroso', 'notável', 'irregular', 'idoso', 'excessivo', 'exato', 'espantoso', 'escolar', 'erótico', 'enganado', 'bastante', 'agitado', 'actual', 'absorto', 'teatral', 'roxo', 'robusto', 'reluzente', 'nacional', 'médico', 'moço', 'legítimo', 'idiota', 'frequente', 'conveniente', 'conhecido', 'chocado', 'brando', 'apinhado', 'tonto', 'sentimental', 'rubro', 'rosado', 'primitivo', 'ocasional', 'lateral', 'intelectual', 'horrorizado', 'devido', 'desgraçado', 'demorado', 'corrente', 'automático', 'triunfante', 'soturno', 'sorridente', 'radical', 'policial', 'perturbador', 'perplexo', 'perfumado', 'ocidental', 'monótono', 'lúgubre', 'intacto', 'indiano', 'impressionado', 'hebraico', 'exagerado', 'escarlate', 'devoto', 'constrangido', 'carregado', 'bizarro', 'aparente', 'variado', 'trágico', 'maduro', 'irónico', 'injusto', 'indeciso', 'ideal', 'heróico', 'favorável', 'débil', 'cultural', 'corajoso', 'concentrado', 'comovido', 'clandestino', 'ameaçador', 'alvo', 'vibrante', 'turístico', 'temporário', 'simbólico', 'sarcástico', 'respeitável', 'repentino', 'quadrado', 'preferível', 'ousado', 'oriental', 'negativo', 'lívido', 'límpido', 'insensível', 'grandioso', 'fatigado', 'existente', 'escondido', 'característico', 'alemão', 'sábio', 'sonoro', 'significativo', 'secundário', 'rude', 'repugnante', 'recto', 'prateado', 'podre', 'murcho', 'medieval', 'longínquo', 'intrigado', 'imperioso', 'hábil', 'formoso', 'fascinante', 'extenso', 'estridente', 'específico', 'embaraçoso', 'diário', 'disponível', 'cômico', 'cru', 'correcto', 'convencido', 'careca', 'agressivo', 'africano', 'vagaroso', 'sórdido', 'suado', 'satisfatório', 'prudente', 'obstante', 'natal', 'mesquinho', 'mecânico', 'leal', 'impressionante', 'implacável', 'horroroso', 'frouxo', 'formidável', 'fixo', 'enfadonho', 'emocional', 'económico', 'disperso', 'desvairado', 'desolado', 'dado', 'cintilante', 'afável', 'ágil', 'vistoso', 'traseiro', 'tolo', 'tardio', 'são', 'russo', 'receoso', 'prolongado', 'prodigioso', 'privilegiado', 'odioso', 'meigo', 'lógico', 'irresistível', 'intolerável', 'infernal', 'indispensável', 'habituado', 'fundamental', 'extra', 'espetado', 'escusado', 'egoísta', 'duvidoso', 'descuidado', 'convencional', 'conseguinte', 'confidencial', 'condicionado', 'cheiroso', 'cauteloso', 'atual', 'anual', 'aceitável', 'vário', 'vaidoso', 'trivial', 'tosco', 'rosa', 'ritual', 'respeitoso', 'reservado', 'propício', 'postiço', 'positivo', 'pequenino', 'pavoroso', 'patético', 'paciente', 'ondulante', 'nojento', 'musical', 'minucioso', 'maluco', 'improvável', 'glorioso', 'gigante', 'extravagante', 'exclusivo', 'esbranquiçado', 'enérgico', 'enrugado', 'direto', 'crítico', 'corpulento', 'básico', 'brutal', 'atarefado', 'arrumado', 'animal', 'ávido', 'vitoriano', 'tranquilizador', 'sonolento', 'reduzido', 'radioso', 'psicológico', 'primário', 'preferido', 'múltiplo', 'municipal', 'matinal', 'liberal', 'jovial', 'inofensivo', 'inexplicável', 'inclinado', 'iminente', 'flexível', 'financeiro', 'febril', 'entusiasmado', 'eletrônico', 'eficiente', 'directo', 'diplomático', 'diabólico', 'crucial', 'circular', 'bem-vindo', 'avermelhado', 'aterrorizado', 'aterrado', 'assírio', 'assente', 'apagado', 'ameno', 'amarelado', 'abundante', 'abjecto', 'vulnerável', 'volumoso', 'vertical', 'tácito', 'salgado', 'rasgado', 'perverso', 'permanente', 'peculiar', 'noturno', 'mórbido', 'momentâneo', 'memorável', 'maternal', 'lúcido', 'individual', 'indefinido', 'imponente', 'imperial', 'impecável', 'impassível', 'imaginário', 'hediondo', 'genial', 'fúnebre', 'exigente', 'festivo']
    #keywords = ["abbo", "abo", "abortion", "abuse", "addict", "addicts", "adult", "africa", "african", "alla", "allah", "alligatorbait", "amateur", "american", "anal", "analannie", "analsex", "angie", "angry", "anus", "arab", "arabs", "areola", "argie", "aroused", "arse", "arsehole", "asian", "ass", "assassin", "assassinate", "assassination", "assault", "assbagger", "assblaster", "assclown", "asscowboy", "asses", "assfuck", "assfucker", "asshat", "asshole", "assholes", "asshore", "assjockey", "asskiss", "asskisser", "assklown", "asslick", "asslicker", "asslover", "assman", "assmonkey", "assmunch", "assmuncher", "asspacker", "asspirate", "asspuppies", "assranger", "asswhore", "asswipe", "athletesfoot", "attack", "australian", "babe", "babies", "backdoor", "backdoorman", "backseat", "badfuck", "balllicker", "balls", "ballsack", "banging", "baptist", "barelylegal", "barf", "barface", "barfface", "bast", "bastard ", "bazongas", "bazooms", "beaner", "beast", "beastality", "beastial", "beastiality", "beatoff", "beat-off", "beatyourmeat", "beaver", "bestial", "bestiality", "bi", "biatch", "bible", "bicurious", "bigass", "bigbastard", "bigbutt", "bigger", "bisexual", "bi-sexual", "bitch", "bitcher", "bitches", "bitchez", "bitchin", "bitching", "bitchslap", "bitchy", "biteme", "black", "blackman", "blackout", "blacks", "blind", "blow", "b*****b", "boang", "bogan", "bohunk", "bollick", "bollock", "bomb", "bombers", "bombing", "bombs", "bomd", "bondage", "boner", "bong", "boob", "boobies", "boobs", "booby", "boody", "boom", "boong", "boonga", "boonie", "booty", "bootycall", "bountybar", "bra", "brea5t", "breast", "breastjob", "breastlover", "breastman", "brothel", "bugger", "buggered", "buggery", "bullcrap", "bulldike", "bulldyke", "bullshit", "bumblefuck", "bumfuck", "bunga", "bunghole", "buried", "burn", "butchbabes", "butchdike", "butchdyke", "butt", "buttbang", "butt-bang", "buttface", "buttfuck", "butt-f**k", "buttfucker", "butt-f****r", "buttfuckers", "butt-fuckers", "butthead", "buttman", "buttmunch", "buttmuncher", "buttpirate", "b******g", "buttstain", "byatch", "cacker", "cameljockey", "cameltoe", "canadian", "cancer", "carpetmuncher", "carruth", "catholic", "catholics", "cemetery", "chav", "cherrypopper", "chickslick", "children's", "chin", "chinaman", "chinamen", "chinese", "chink", "chinky", "choad", "chode", "christ", "christian", "church", "cigarette", "cigs", "clamdigger", "clamdiver", "c**t", "clitoris", "clogwog", "cocaine", "c**k", "cockblock", "cockblocker", "cockcowboy", "cockfight", "cockhead", "cockknob", "cocklicker", "cocklover", "cocknob", "cockqueen", "cockrider", "cocksman", "cocksmith", "cocksmoker", "cocksucer", "cocksuck ", "cocksucked ", "c********r", "cocksucking", "cocktail", "cocktease", "cocky", "cohee", "coitus", "color", "colored", "coloured", "commie", "communist", "condom", "conservative", "conspiracy", "coolie", "cooly", "coon", "coondog", "copulate", "cornhole", "corruption", "cra5h", "crabs", "crack", "crackpipe", "crackwhore", "crack-w***e", "crap", "crapola", "crapper", "crappy", "crash", "creamy", "crime", "crimes", "criminal", "criminals", "crotch", "crotchjockey", "crotchmonkey", "crotchrot", "cum", "cumbubble", "cumfest", "cumjockey", "cumm", "cummer", "cumming", "cumquat", "cumqueen", "cumshot", "cunilingus", "cunillingus", "cunn", "cunnilingus", "cunntt", "c**t", "cunteyed", "cuntfuck", "cuntfucker", "cuntlick ", "cuntlicker ", "cuntlicking ", "cuntsucker", "cybersex", "cyberslimer", "dago", "dahmer", "dammit", "damn", "damnation", "damnit", "darkie", "darky", "datnigga", "dead", "deapthroat", "death", "deepthroat", "defecate", "dego", "demon", "deposit", "desire", "destroy", "deth", "devil", "devilworshipper", "dick", "dickbrain", "dickforbrains", "dickhead", "dickless", "dicklick", "dicklicker", "dickman", "dickwad", "dickweed", "diddle", "die", "died", "dies", "dike", "d***o", "dingleberry", "dink", "dipshit", "dipstick", "dirty", "disease", "diseases", "disturbed", "dive", "dix", "dixiedike", "dixiedyke", "doggiestyle", "doggystyle", "dong", "doodoo", "doo-doo", "doom", "dope", "dragqueen", "dragqween", "dripdick", "drug", "drunk", "drunken", "dumb", "dumbass", "dumbbitch", "dumbfuck", "dyefly", "dyke", "easyslut", "eatballs", "eatme", "eatpussy", "ecstacy", "e*******e", "ejaculated", "ejaculating ", "e*********n", "enema", "enemy", "erect", "erection", "ero", "escort", "ethiopian", "ethnic", "european", "evl", "excrement", "execute", "executed", "execution", "executioner", "explosion", "facefucker", "faeces", "f*g", "fagging", "f****t", "fagot", "failed", "failure", "fairies", "fairy", "faith", "fannyfucker", "fart", "farted ", "farting ", "farty ", "fastfuck", "fat", "fatah", "fatass", "fatfuck", "fatfucker", "fatso", "fckcum", "fear", "feces", "felatio ", "felch", "felcher", "f******g", "f******o", "feltch", "feltcher", "feltching", "fetish", "fight", "filipina", "filipino", "fingerfood", "fingerfuck ", "fingerfucked ", "fingerfucker ", "fingerfuckers", "fingerfucking ", "fire", "firing", "fister", "fistfuck", "fistfucked ", "fistfucker ", "fistfucking ", "fisting", "flange", "flasher", "flatulence", "floo", "flydie", "flydye", "fok", "fondle", "footaction", "footfuck", "footfucker", "footlicker", "footstar", "fore", "foreskin", "forni", "fornicate", "foursome", "fourtwenty", "fraud", "freakfuck", "freakyfucker", "freefuck", "fu", "fubar", "fuc", "fucck", "f**k", "f***a", "fuckable", "fuckbag", "fuckbuddy", "f****d", "fuckedup", "f****r", "fuckers", "fuckface", "fuckfest", "fuckfreak", "fuckfriend", "fuckhead", "fuckher", "f****n", "fuckina", "f*****g", "fuckingbitch", "fuckinnuts", "fuckinright", "fuckit", "fuckknob", "f****e ", "fuckmehard", "fuckmonkey", "fuckoff", "fuckpig", "f***s", "fucktard", "fuckwhore", "fuckyou", "f*********r", "fugly", "fuk", "fuks", "funeral", "funfuck", "fungus", "fuuck", "g******g", "gangbanged ", "gangbanger", "gangsta", "gatorbait", "gay", "gaymuthafuckinwhore", "gaysex ", "geez", "geezer", "geni", "genital", "german", "getiton", "gin", "ginzo", "gipp", "girls", "givehead", "glazeddonut", "gob", "god", "godammit", "goddamit", "goddammit", "goddamn", "goddamned", "goddamnes", "goddamnit", "goddamnmuthafucker", "goldenshower", "gonorrehea", "gonzagas", "gook", "gotohell", "goy", "goyim", "greaseball", "gringo", "groe", "gross", "grostulation", "gubba", "gummer", "gun", "gyp", "gypo", "gypp", "gyppie", "gyppo", "gyppy", "hamas", "handjob", "hapa", "harder", "hardon", "harem", "headfuck", "headlights", "hebe", "heeb", "hell", "henhouse", "heroin", "herpes", "heterosexual", "hijack", "hijacker", "hijacking", "hillbillies", "hindoo", "hiscock", "hitler", "hitlerism", "hitlerist", "hiv", "ho", "hobo", "hodgie", "hoes", "hole", "holestuffer", "homicide", "h**o", "homobangers", "homosexual", "honger", "honk", "honkers", "honkey", "honky", "hook", "hooker", "hookers", "hooters", "hore", "hork", "horn", "horney", "horniest", "horny", "horseshit", "hosejob", "hoser", "hostage", "hotdamn", "hotpussy", "hottotrot", "hummer", "husky", "hussy", "hustler", "hymen", "hymie", "iblowu", "idiot", "ikey", "illegal", "incest", "insest", "intercourse", "interracial", "intheass", "inthebuff", "israel", "israeli", "israel's", "italiano", "itch", "jackass", "jackoff", "jackshit", "jacktheripper", "jade", "jap", "japanese", "japcrap", "jebus", "jeez", "jerkoff", "jesus", "jesuschrist", "jew", "jewish", "jiga", "jigaboo", "jigg", "jigga", "jiggabo", "jigger ", "jiggy", "jihad", "jijjiboo", "jimfish", "jism", "j*z ", "jizim", "jizjuice", "jizm ", "j**z", "jizzim", "jizzum", "joint", "juggalo", "jugs", "junglebunny", "kaffer", "kaffir", "kaffre", "kafir", "kanake", "kid", "kigger", "kike", "kill", "killed", "killer", "killing", "kills", "kink", "kinky", "kissass", "kkk", "knife", "knockers", "kock", "kondum", "koon", "kotex", "krap", "krappy", "kraut", "kum", "kumbubble", "kumbullbe", "kummer", "kumming", "kumquat", "kums", "kunilingus", "kunnilingus", "kunt", "ky", "kyke", "lactate", "laid", "lapdance", "latin", "lesbain", "lesbayn", "lesbian", "lesbin", "lesbo", "lez", "lezbe", "lezbefriends", "lezbo", "lezz", "lezzo", "liberal", "libido", "licker", "lickme", "lies", "limey", "limpdick", "limy", "lingerie", "liquor", "livesex", "loadedgun", "lolita", "looser", "loser", "lotion", "lovebone", "lovegoo", "lovegun", "lovejuice", "lovemuscle", "lovepistol", "loverocket", "lowlife", "lsd", "lubejob", "lucifer", "luckycammeltoe", "lugan", "lynch", "macaca", "mad", "mafia", "magicwand", "mams", "manhater", "manpaste", "marijuana", "mastabate", "mastabater", "masterbate", "masterblaster", "mastrabator", "m********e", "masturbating", "mattressprincess", "meatbeatter", "meatrack", "meth", "mexican", "mgger", "mggor", "mickeyfinn", "mideast", "milf", "minority", "mockey", "mockie", "mocky", "mofo", "moky", "moles", "molest", "molestation", "molester", "molestor", "moneyshot", "mooncricket", "mormon", "moron", "moslem", "mosshead", "mothafuck", "m********a", "mothafuckaz", "mothafucked ", "mothafucker", "mothafuckin", "mothafucking ", "mothafuckings", "motherfuck", "motherfucked", "m**********r", "motherfuckin", "motherfucking", "motherfuckings", "motherlovebone", "muff", "muffdive", "muffdiver", "muffindiver", "mufflikcer", "mulatto", "muncher", "munt", "murder", "murderer", "muslim", "naked", "narcotic", "nasty", "nastybitch", "nastyho", "nastyslut", "nastywhore", "nazi", "necro", "negro", "negroes", "negroid", "negro's", "nig", "niger", "nigerian", "nigerians", "nigg", "n***a", "niggah", "niggaracci", "niggard", "niggarded", "niggarding", "niggardliness", "niggardliness's", "niggardly", "niggards", "niggard's", "niggaz", "nigger", "niggerhead", "niggerhole", "niggers", "nigger's", "niggle", "niggled", "niggles", "niggling", "nigglings", "niggor", "niggur", "niglet", "nignog", "nigr", "nigra", "nigre", "nip", "nipple", "nipplering", "nittit", "nlgger", "nlggor", "nofuckingway", "nook", "nookey", "nookie", "noonan", "nooner", "nude", "nudger", "nuke", "nutfucker", "nymph", "ontherag", "oral", "orga", "orgasim ", "o****m", "orgies", "orgy", "osama", "paki", "palesimian", "palestinian", "pansies", "pansy", "panti", "panties", "payo", "pearlnecklace", "peck", "pecker", "peckerwood", "pee", "peehole", "pee-pee", "peepshow", "peepshpw", "pendy", "penetration", "peni5", "penile", "penis", "penises", "penthouse", "period", "perv", "phonesex", "phuk", "phuked", "phuking", "phukked", "phukking", "phungky", "phuq", "pi55", "picaninny", "piccaninny", "pickaninny", "piker", "pikey", "piky", "pimp", "pimped", "pimper", "pimpjuic", "pimpjuice", "pimpsimp", "pindick", "piss", "pissed", "pisser", "pisses ", "pisshead", "pissin ", "pissing", "pissoff ", "pistol", "pixie", "pixy", "playboy", "playgirl", "pocha", "pocho", "pocketpool", "pohm", "polack", "pom", "pommie", "pommy", "poo", "poon", "poontang", "poop", "pooper", "pooperscooper", "pooping", "poorwhitetrash", "popimp", "porchmonkey", "p**n", "pornflick", "pornking", "porno", "pornography", "pornprincess", "pot", "poverty", "premature", "pric", "prick", "prickhead", "primetime", "propaganda", "pros", "prostitute", "protestant", "pu55i", "pu55y", "pube", "pubic", "pubiclice", "pud", "pudboy", "pudd", "puddboy", "puke", "puntang", "purinapricness", "puss", "pussie", "pussies", "pussy", "pussycat", "pussyeater", "pussyfucker", "pussylicker", "pussylips", "pussylover", "pussypounder", "pusy", "quashie", "queef", "queer", "quickie", "quim", "ra8s", "rabbi", "racial", "racist", "radical", "radicals", "raghead", "randy", "rape", "raped", "raper", "rapist", "rearend", "rearentry", "rectum", "redlight", "redneck", "reefer", "reestie", "refugee", "reject", "remains", "rentafuck", "republican", "rere", "retard", "retarded", "ribbed", "rigger", "rimjob", "rimming", "roach", "robber", "roundeye", "rump", "russki", "russkie", "sadis", "sadom", "samckdaddy", "sandm", "sandnigger", "satan", "scag", "scallywag", "scat", "schlong", "screw", "screwyou", "scrotum", "scum", "s***n", "seppo", "servant", "sex", "sexed", "sexfarm", "sexhound", "sexhouse", "sexing", "sexkitten", "sexpot", "sexslave", "sextogo", "sextoy", "sextoys", "sexual", "sexually", "sexwhore", "sexy", "sexymoma", "sexy-slim", "shag", "shaggin", "shagging", "shat", "shav", "shawtypimp", "sheeney", "shhit", "shinola", "shit", "shitcan", "shitdick", "s***e", "shiteater", "shited", "shitface", "shitfaced", "shitfit", "shitforbrains", "shitfuck", "shitfucker", "shitfull", "shithapens", "shithappens", "s******d", "shithouse", "shiting", "shitlist", "shitola", "shitoutofluck", "shits", "shitstain", "s*****d", "shitter", "s******g", "shitty ", "shoot", "shooting", "shortfuck", "showtime", "sick", "sissy", "sixsixsix", "sixtynine", "sixtyniner", "skank", "skankbitch", "skankfuck", "skankwhore", "skanky", "skankybitch", "skankywhore", "skinflute", "skum", "skumbag", "slant", "slanteye", "slapper", "slaughter", "slav", "slave", "slavedriver", "sleezebag", "sleezeball", "slideitin", "slime", "slimeball", "slimebucket", "slopehead", "slopey", "slopy", "s**t", "s***s", "slutt", "slutting", "slutty", "slutwear", "slutwhore", "smack", "smackthemonkey", "smut", "snatch", "snatchpatch", "snigger", "sniggered", "sniggering", "sniggers", "snigger's", "sniper", "snot", "snowback", "snownigger", "sob", "sodom", "sodomise", "sodomite", "sodomize", "sodomy", "sonofabitch", "sonofbitch", "sooty", "sos", "soviet", "spaghettibender", "spaghettinigger", "spank", "spankthemonkey", "sperm", "spermacide", "spermbag", "spermhearder", "spermherder", "spic", "spick", "spig", "spigotty", "spik", "spit", "spitter", "splittail", "spooge", "spreadeagle", "spunk", "spunky", "squaw", "stagg", "stiffy", "strapon", "stringer", "stripclub", "stroke", "stroking", "stupid", "stupidfuck", "stupidfucker", "suck", "suckdick", "sucker", "suckme", "suckmyass", "suckmydick", "suckmytit", "suckoff", "suicide", "swallow", "swallower", "swalow", "swastika", "sweetness", "syphilis", "taboo", "taff", "tampon", "tang", "tantra", "tarbaby", "tard", "teat", "terror", "terrorist", "teste", "testicle", "testicles", "thicklips", "thirdeye", "thirdleg", "threesome", "threeway", "timbernigger", "tinkle", "tit", "titbitnipply", "titfuck", "titfucker", "titfuckin", "titjob", "titlicker", "titlover", "t**s", "tittie", "titties", "titty", "tnt", "toilet", "tongethruster", "tongue", "tonguethrust", "tonguetramp", "tortur", "torture", "tosser", "towelhead", "trailertrash", "tramp", "trannie", "tranny", "transexual", "transsexual", "transvestite", "triplex", "trisexual", "trojan", "trots", "tuckahoe", "tunneloflove", "turd", "turnon", "twat", "twink", "twinkie", "twobitwhore", "uck", "uk", "unfuckable", "upskirt", "uptheass", "upthebutt", "urinary", "urinate", "urine", "usama", "uterus", "v****a", "vaginal", "vatican", "vibr", "vibrater", "vibrator", "vietcong", "violence", "virgin", "virginbreaker", "vomit", "vulva", "wab", "wank", "wanker", "wanking", "waysted", "weapon", "weenie", "weewee", "welcher", "welfare", "wetb", "wetback", "wetspot", "whacker", "whash", "whigger", "whiskey", "whiskeydick", "whiskydick", "whit", "whitenigger", "whites", "whitetrash", "whitey", "whiz", "whop", "w***e", "whorefucker", "whorehouse", "wigger", "willie", "williewanker", "willy", "wn", "wog", "women's", "wop", "wtf", "wuss", "wuzzie", "xtc", "xxx", "yankee", "yellowman", "zigabo", "zipperhead"]
    #keywords = ['adorable', 'beautiful', 'clean', 'drab', 'elegant', 'fancy', 'glamorous', 'handsome', 'long', 'magnificent', 'old-fashioned', 'plain', 'quaint', 'sparkling', 'ugliest', 'unsightly', 'wide-eyed', 'red', 'orange', 'yellow', 'green', 'blue', 'purple', 'gray', 'black', 'white', 'alive', 'better', 'careful', 'clever', 'dead', 'easy', 'famous', 'gifted', 'helpful', 'important', 'inexpensive', 'mushy', 'odd', 'powerful', 'rich', 'shy', 'tender', 'uninterested', 'vast', 'wrong', 'angry', 'bewildered', 'clumsy', 'defeated', 'embarrassed', 'fierce', 'grumpy', 'helpless', 'itchy', 'jealous', 'lazy', 'mysterious', 'nervous', 'obnoxious', 'panicky', 'repulsive', 'scary', 'thoughtless', 'uptight', 'worried', 'agreeable', 'brave', 'calm', 'delightful', 'eager', 'faithful', 'gentle', 'happy', 'jolly', 'kind', 'lively', 'nice', 'obedient', 'proud', 'relieved', 'silly', 'thankful', 'victorious', 'witty', 'zealous', 'broad', 'chubby', 'crooked', 'curved', 'deep', 'flat', 'high', 'hollow', 'low', 'narrow', 'round', 'shallow', 'skinny', 'square', 'steep', 'straight', 'wide', 'big', 'colossal', 'fat', 'gigantic', 'great', 'huge', 'immense', 'large', 'little', 'mammoth', 'massive', 'miniature', 'petite', 'puny', 'scrawny', 'short', 'small', 'tall', 'teeny', 'teeny-tiny', 'tiny', 'cooing', 'deafening', 'faint', 'hissing', 'loud', 'melodic', 'noisy', 'purring', 'quiet', 'raspy', 'screeching', 'thundering', 'voiceless', 'whispering', 'ancient', 'brief', 'early', 'fast', 'late', 'long', 'modern', 'old', 'old-fashioned', 'quick', 'rapid', 'short', 'slow', 'swift', 'young', 'bitter', 'delicious', 'fresh', 'greasy', 'juicy', 'hot', 'icy', 'loose', 'melted', 'nutritious', 'prickly', 'rainy', 'rotten', 'salty', 'sticky', 'strong', 'sweet', 'tart', 'tasteless', 'uneven', 'weak', 'wet', 'wooden', 'yummy', 'boiling', 'breeze', 'broken', 'bumpy', 'chilly', 'cold', 'cool', 'creepy', 'crooked', 'cuddly', 'curly', 'damaged', 'damp', 'dirty', 'dry', 'dusty', 'filthy', 'flaky', 'fluffy', 'freezing', 'hot', 'warm', 'wet', 'abundant', 'empty', 'few', 'full', 'heavy', 'light', 'many', 'numerous', 'sparse', 'substantial']
    #keywords = ['Du Hurensohn', 'Arschfotze', 'Arschgeige', 'Arschgesicht', 'Arschloch', 'Bloede Kuh', 'Bulle', 'Das ist mir scheißegal', 'Das war zu einfach', 'Deine Mutter schwitzt beim Kacken', 'Deine Mutter', 'Depp', 'Die Möpse', 'Dreckige Hure', 'Drecksau', 'Du Bastard', 'Du Drecksack', 'Du Fickfehler', 'Du Muschi', 'Du Schweinehund', 'Du Weichei', 'Du alte Kackbratze', 'Du arschgefickter Hurensohn', 'Du bist ein volltrottel', 'Du hässliger Ziegenficker', 'Du kannst mich mal', 'Du verdammtes Arschloch', 'Dumme Schlampe', 'Dummes Huhn', 'Dummkopf', 'Fick deine Mutter', 'Fick dich Arschloch', 'Fick dich ins Knie', 'Fick dich', 'Ficker', 'Fotze', 'Geh Staub fressen', 'Geh zum Teufel', 'Geh kacken', 'Gottverdammt ', 'Halt deine Fresse', 'Halt die Fresse', 'Hirnlose Ochse', 'Huhrensohn', 'Hurensohn', 'Ich Liebe Dich', 'Ich ficke deine Schwester', 'Ich hasse dich', 'Ich will dich ficken', 'Ich will ficken', 'Kackbratze', 'LMS', 'Lutsch mein Schwanz', 'Leck mich am Arsch', 'Luder', 'Lutsch meine Eier', 'Mieser Stricher', 'Mutterficker', 'Nutle', 'Nuttensohn', 'Onanieren', 'Scheissen', 'Scheiße', 'Scheißhaus', 'Schlampe', 'Schwanzlutscher', 'Schweinepriester', 'Schwuchtel', 'Schwul', 'Sheisse', 'Trottel', 'Veganer', 'Verdammte Scheiße', 'Verpiss dich', 'Wichser', 'Wixer', 'Zeig mir deine Pflaume', 'Zicke', 'Zickig', 'blutige Sau', 'bumsen', 'das Arschloch', 'der Arsch', 'der Schwanz', 'der Teufel', 'der abschaum', 'der abschaum der menschlichen gesellschaft', 'der drecksack', 'der dreckskerl', 'der schwanz', 'der schwanzlutscher', 'die Fotze', 'die Hure', 'die Scheiße', 'die Schlampe', 'die Titten', 'du blöde stinkfotze', 'du dumme kuh', 'du verdammter Arschficker', 'dumme Kuh', 'duncauf', 'fahr zur holle', 'fick dich', 'fickdich', 'homofuerst', 'ich fick deine mutter', 'ich will dich ficken', 'missgeburt', 'muschi lecker', 'verdammt du hurensohn', 'verpiss dich', 'voegeln', 'vögeln', 'wichser', 'zur Holle mit dir']
    #keywords += ['Phrase', 'abenteuerlich', 'abhängig', 'abwesend', 'aggressiv', 'ahnungslos', 'aktiv', 'allein', 'altmodisch', 'anpassungsfähig', 'anständig', 'ärgerlich', 'arm', 'arrogant', 'attraktiv', 'ätzend', 'aufgeklärt', 'aufgeregt', 'aufgeschlossen', 'aufrichtig', 'ausgeflippt', 'begabt', 'begeistert', 'bekannt', 'berühmt', 'beliebt', 'populär', 'bequem', 'bescheiden', 'bescheuert', 'bezaubernd', 'billig', 'blöd', 'boshaft', 'brav', 'charmant', 'cool', 'dankbar', 'dick', 'dumm', 'doof', 'dünn', 'egozentrisch', 'ehrgeizig', 'ehrlich', 'eifersüchtig', 'einfach', 'eingebildet', 'einmalig', 'einsam', 'einverstanden', 'ekelhaft', 'eklig', 'elegant', 'empfindlich', 'engagiert', 'engstirnig', 'erfolgreich', 'ernst', 'erstklassig', 'fair', 'falsch', 'fantastisch', 'faszinierend', 'faul', 'feig', 'fein', 'fest', 'fett', 'fit', 'fleißig', 'fortgeschritten', 'frech', 'frei', 'freundlich', 'froh', 'fröhlich', 'fürsorglich', 'gastfreundlich', 'gebildet', 'geduldig', 'gefährlich', 'gefühlvoll', 'geistreich', 'geizig', 'gemein', 'gemütlich', 'genial', 'gerecht', 'geschätzt', 'gescheit', 'geschickt', 'geschlossen', 'geschwätzig', 'gesellig', 'gesund', 'gierig', 'glaubwürdig', 'glücklich', 'grob', 'grosszügig', 'groß', 'grüntig', 'gut', 'gut angezogen', 'gut gelaunt', 'gut informiert', 'halb', 'halsstarrig', 'hart', 'hartnäckig', 'hässlich', 'heiß', 'heiter', 'hell', 'hemmungslos', 'herrlich', 'herzlos', 'hilfreich', 'hinterlistig', 'hoch', 'hochmütig', 'hochnäsig', 'höflich', 'hübsch', 'hungrig', 'idealistisch', 'intelligent', 'interessant', 'intolerant', 'jung', 'kalt', 'kindisch', 'klasse', 'toll', 'super', 'klein', 'kleinlich', 'klug', 'komisch', 'kompliziert', 'konsequent', 'konservativ', 'kontaktfreudig', 'kräftig', 'krank', 'kreativ', 'kritisch', 'krumm', 'labil', 'lang', 'langsam', 'langweilig', 'lässig', 'launisch', 'laut', 'lebendig', 'leicht', 'leichtsinnig', 'leidenschaftlich', 'leise', 'liberal', 'lieb', 'liebenswürdig', 'lustig', 'melancholisch', 'merkwürdig', 'miserabel', 'misstrauisch', 'modern', 'modisch', 'mollig', 'moralisch', 'munter', 'musikalisch', 'mutig', 'nachlässig', 'nah', 'naß', 'neidisch', 'nervös', 'nett', 'neu', 'neugierig', 'niedergeschlagen', 'niedlich', 'niedrig', 'normal', 'oberflächlich', 'offen', 'optimistisch', 'ordentlich', 'passiv', 'parteiisch', 'peinlich', 'pessimistisch', 'praktisch', 'pünktlich', 'radikal', 'raffiniert', 'rauh', 'rebellisch', 'recht', 'rechthaberisch', 'redlich', 'reich', 'reif', 'religiös', 'richtig', 'riesig', 'romantisch', 'rücksichtslos', 'rücksichtsvoll', 'ruhig', 'sauber', 'sauer', 'schick', 'schlampig', 'schlau', 'schlecht', 'schlimm', 'schmutzig', 'schnell', 'schön', 'schüchtern', 'schwach', 'schwer', 'schwierig', 'schwerfällig', 'schwermütig', 'selbstlos', 'selbstsicher', 'selbstsüchtig', 'seltsam', 'sensibel', 'sicher', '"sorgenlos', 'sorgfältig', 'spät', 'spontan', 'sportlich', 'spöttisch', 'stark', 'stolz (auf)', 'streitsüchtig', 'süß', 'sympathisch', 'taktlos', 'taktvoll', 'temperamentvoll', 'teuer', 'tot', 'traurig', 'treu', 'typisch', 'übergeschnappt', 'umweltbewusst', 'unabhängig', 'unbeholfen', 'unbekümmert', 'unberechenbar', 'unbeugsam', 'unerfarhren', 'ungehorsam', 'ungeschickt', 'unhöflich', 'unwiderstehlich', 'verantwortlich', 'verbissen', 'verdrießlich', 'verlässlich', 'verlegen', 'vernünftig', 'verrückt', 'vertrauenswürdig', 'verwirrt', 'verwöhnt', 'verzweifelt', 'vorsichtig', 'wahnsinnig', 'warm', 'warmherzig', 'wichtig', 'widerlich', 'wild', 'winzig', 'witzig', 'wunderbar', 'wunderschön', 'zerstreut', 'zufällig', 'zufrieden', 'zusammen', 'zuverlässig']
    #keywords += ['ehrgeizig', 'Amerikaner', 'ärgerlich', 'schlecht', 'schön', 'groß', 'blondine', 'langweilig', 'tapfer', 'unbesonnen', 'vorsichtig', 'bestimmt', 'charmant', 'fröhlich', 'Chinesisch', 'eingebildet', 'herkömmlich', 'feigling', 'Nüsse', 'grausam', 'schwierig', 'unangenehm', 'langweilig', 'leicht', 'Englisch', 'unecht', 'Fett', 'ein wenig', 'Französisch', 'häufig', 'freundlich', 'amüsant', 'komisch', 'General', 'großzügig', 'Deutsch', 'gut', 'hübsch', 'fleißig', 'hoch', 'ehrlich', 'intelligent', 'interessant', 'Art', 'entspannend', 'faul', 'klein', 'kurz', 'niedrig', 'bescheiden', 'launisch', 'naiv', 'engstirnig', 'neu', 'nett', 'alt', 'vollkommen', 'Persönlicher', 'fromm', 'höflich', 'schlecht', 'möglich', 'ziemlich', 'stolz', 'schnell', 'realistisch', 'neu', 'zuverlässig', 'reich', 'jämmerlich', 'egoistisch', 'empfindlich', 'schüchtern', 'stumm', 'dünn', 'schlank', 'langsam', 'klein', 'Spanisch', 'streng', 'stark', 'störrisch', 'gesprächig', 'vertrauenswürdig', 'hässlich', 'verschieden', 'schwach', 'unheimlich', 'weiß', 'jung']
    lang = 'de'
    #lang = 'en'
    #lang = 'pt'

    time = datetime.now().time()
    print(time)

    error = []
    x = 0

    try:
        tso = TwitterSearchOrder() # create a TwitterSearchOrder object
        tso.set_language(lang) # we want to see German tweets only
        tso.set_include_entities(False) # and don't give us all those entity information

        # it's about time to create a TwitterSearch object with our secret tokens
        ts1 = TwitterSearch(
            consumer_key = 'kTs2YF5jFUZwRwnfprYMmtHc4',
            consumer_secret = 'ivn8IIaf9EByQgZ4wgvAU2nERl4J3uiuqziRWTg71ZnwOqKt3S',
            access_token = '1323543967-Ws0vHHW5CC2MlGHYDvY8oFsrLEisLucl4kJ8PlO',
            access_token_secret = 'WEQZDmz50jFmg81fjUcRQWUyPOCIUcxiWGez2UF1skk45',
         )


        ts2 = TwitterSearch(
            consumer_key = 'KTlBBJPbcphOToiXMvN0ZWAkO',
            consumer_secret = 'h5tOgnm8dt9c1lU21GdsSKPGNuBw1Zhhmlluo6bPiAucfDW0Fk',
            access_token = '1323543967-mmSfNBFyMtvLArmsaTdwiWsfunq4OHCwWUc472y',
            access_token_secret = 'jxEtyXlf5Gmh6S6awRgQ97TXsQxt5NUpxS4r8Ips1I4FH',
         )

        ts3 = TwitterSearch(
            consumer_key = 'edGj6kTiDU1NOtdSyHHJ6yoD4',
            consumer_secret = 'sGZmEV7xq4yx2ixwbhuBq6v1aCgRwuBF3IqIqhfYNLhWMokOML',
            access_token = '1323543967-JcSeOiXMxND46nMj3mkx43J8SB4A3GBR24Pkfx2',
            access_token_secret = 'zr19f231ZuV8D8gpo8asjnfEXI4gX0xWQ6AXk0kyMvimu',
         )


        ts4 = TwitterSearch(
            consumer_key = 'j8l5JUzvtn9FB8SBuOdtKsifE',
            consumer_secret = 'nKAEJnzgNn8G85bisr3wosZNKz61GnYZ3Nj2h8f6nLUP95K2L6',
            access_token = '1323543967-8EX0CGfWERxnmAbh3efPPaqpM0Dz1r6ZGaHTpRY',
            access_token_secret = '2Iga4V9dyvbR8AB3UmXd3NAqBinbJcMlu8PzNQhmoKS8O',
         )

        tss = [ts1, ts2, ts3, ts4]


    except TwitterSearchException as e: # take care of all those ugly errors if there are some
        print(e)
        return
    latest_id = tweets.find( {}, { 'object.tweet_id':1 } ).sort("startedAtTime").limit(1)
    latest_id_str = latest_id[db_count-1]['object']['tweet_id']
    latest_id_int = int(latest_id_str)
    print 'Count of documents in the ' + keyword + ' collection is not 0. It is ' + str(db_count) + '. Mongo is now identifying the latest tweet ID to append as a parameter to the API call.'
# If ther are no documents in the collection, no queries are done, and the since_id is left out of the API call.    
else:
    print 'The Mongo collection ' + keyword + ' is empty. The script will now collect all tweets.'
    
# create a TwitterSearchOrder object
tso = TwitterSearchOrder() 

# let's define all words we would like to have a look for
tso.set_keywords([keyword])

# Select language
tso.set_language('en') 

# Include Entity information
tso.set_include_entities(True)

if db_count is not 0:
    tso.set_since_id(latest_id_int)
    print 'Since the document count in the ' + keyword + ' collection is above 0, the since_id uses the parameter of the latest tweet so that only new tweets are collected.'
else:
	print 'No documents exist in the ' + keyword + ' collection right now so the since_id parameter will be empty and all tweets will be collected.'

    
# Create a TwitterSearch object with our secret tokens
ts = TwitterSearch(
    consumer_key = config.get('Twitter', 'consumer_key'),
    consumer_secret = config.get('Twitter', 'consumer_secret'),
def Tweets():

    try:

        max_feeds = 10
        tso = TwitterSearchOrder()  # create a TwitterSearchOrder object
        tso.set_language('en')
        tso.set_include_entities(
            False)  # and don't give us all those entity information
        tso.set_until(new_date)
        tso.arguments.update({'tweet_mode': 'extended'})
        tso.arguments.update({'truncated': 'False'})

        ts = TwitterSearch(
            consumer_key='DMHjSht5U0UqNUsAWpZH9DXok',
            consumer_secret=
            'olCjsx8LltiHxEiPHWafExoibDuu4eZT48udXTeSYcQbLQ3juB',
            access_token='1170976252213125121-ftEg9MzF9siFHUmcUkV6zzT7mQV9Db',
            access_token_secret='eNA62T8Ig40Iz1wmKf6baDGHqY3Wh9kxzu9oaOQdGE9h8',
        )

        for c in range(len(MainDF)):
            count = 0

            #kw=[MainDF['twitter'][c]]
            #for h in MainDF['hashtag'][c]:
            #    kw.append(h)

            tso.set_keywords(MainDF['hashtag'][c])
            tweets_list = []

            tuo = TwitterUserOrder(MainDF['twitter'][c])
            #            tuo.set_language('en')
            tuo.set_include_entities(
                False)  # and don't give us all those entity information
            #            tuo.set_until(days_ago)
            #            tuo.set_count(15)
            tuo.arguments.update({'tweet_mode': 'extended'})
            tuo.arguments.update({'truncated': 'False'})

            #for tweet in ts.search_tweets_iterable(tso):
            #    print(tweet)
            #    tweets_list.append([tweet['user']['screen_name'],tweet['full_text']])

            for tweet in ts.search_tweets_iterable(tso):
                if 'retweeted_status' in tweet:
                    None
                    #tweets_list.append([tweet['user']['screen_name'],tweet['retweeted_status']['full_text'],'Retweet of ' + tweet['retweeted_status']['user']['screen_name']])
                else:
                    links = Find(tweet['full_text'])
                    links = ', '.join(link for link in links)
                    #print(tweet)
                    tweets_list.append([
                        MainDF['company'][c], tweet['user']['screen_name'],
                        tweet['full_text'], tweet['created_at'], links
                    ])

            for tweet in ts.search_tweets_iterable(tuo):
                if tweet['lang'] != 'en':
                    #print(tweet)
                    None
                else:

                    # print(tweet)
                    links = Find(tweet['full_text'])
                    links = ', '.join(link for link in links)

                    tweets_list.append([
                        MainDF['company'][c], tweet['user']['screen_name'],
                        tweet['full_text'], tweet['created_at'], links
                    ])
                    count = count + 1

                    if count == max_feeds:
                        break

            if tweets_list != []:
                tweets_datasets[MainDF['company'][c]] = pd.DataFrame(
                    tweets_list)
                tweets_datasets[MainDF['company'][c]].columns = [
                    'Company', 'Source/User', 'Title/Tweet', 'Date', 'Link'
                ]
                tweets_datasets[MainDF['company'][c]].insert(
                    0, 'Category', 'Twitter')

                for i in range(
                        len(tweets_datasets[MainDF['company'][c]]['Date'])):

                    tweets_datasets[MainDF['company'][c]]['Date'][i] = parse(
                        tweets_datasets[MainDF['company'][c]]['Date'][i])
                    tweets_datasets[
                        MainDF['company'][c]]['Date'][i] = tweets_datasets[
                            MainDF['company'][c]]['Date'][i].date()

                    #print(datasets[companies_names[count]])

                tw_current_companies.append(MainDF['company'][c])

            else:
                None

            #tweets_list.append()
            #print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) )

    except TwitterSearchException as e:  # take care of all those ugly errors if there are some
        print(e)