コード例 #1
0
ファイル: twitter_demo.py プロジェクト: CaptainAL/Spyder
def expand_tweetids_demo():
    """
    Given a file object containing a list of Tweet IDs, fetch the
    corresponding full Tweets.

    """
    ids_f =\
        StringIO("""\
        588665495492124672
        588665495487909888
        588665495508766721
        588665495513006080
        588665495517200384
        588665495487811584
        588665495525588992
        588665495487844352
        588665495492014081
        588665495512948737""")
    oauth = credsfromfile()
    client = Query(**oauth)
    hydrated = client.expand_tweetids(ids_f)

    for tweet in hydrated:
        try:
            id_str = tweet['id_str']
            print('id: {}\ntext: {}\n'.format(id_str, tweet['text']))
        except IndexError:
            pass
コード例 #2
0
ファイル: twitter_demo.py プロジェクト: vishalbelsare/nltk
def expand_tweetids_demo():
    """
    Given a file object containing a list of Tweet IDs, fetch the
    corresponding full Tweets, if available.

    """
    ids_f = StringIO("""\
        588665495492124672
        588665495487909888
        588665495508766721
        588665495513006080
        588665495517200384
        588665495487811584
        588665495525588992
        588665495487844352
        588665495492014081
        588665495512948737""")
    oauth = credsfromfile()
    client = Query(**oauth)
    hydrated = client.expand_tweetids(ids_f)

    for tweet in hydrated:
        id_str = tweet["id_str"]
        print(f"id: {id_str}")
        text = tweet["text"]
        if text.startswith("@null"):
            text = "[Tweet not available]"
        print(text + "\n")
コード例 #3
0
def collect_tweets(my_keyword, json_writer, stop_num):
    my_keyword = my_keyword.strip()
    print('finding tweets with {} keyword'.format(my_keyword))
    oauth = credsfromfile()
    client = Query(**oauth)
    tweets = client.search_tweets(keywords=my_keyword, limit=stop_num)
    dump_tweets(tweets, json_writer)
コード例 #4
0
ファイル: twitter_demo.py プロジェクト: prz3m/kind2anki
def expand_tweetids_demo():
    """
    Given a file object containing a list of Tweet IDs, fetch the
    corresponding full Tweets, if available.

    """
    ids_f = StringIO(
        """\
        588665495492124672
        588665495487909888
        588665495508766721
        588665495513006080
        588665495517200384
        588665495487811584
        588665495525588992
        588665495487844352
        588665495492014081
        588665495512948737"""
    )
    oauth = credsfromfile()
    client = Query(**oauth)
    hydrated = client.expand_tweetids(ids_f)

    for tweet in hydrated:
        id_str = tweet['id_str']
        print('id: {}'.format(id_str))
        text = tweet['text']
        if text.startswith('@null'):
            text = "[Tweet not available]"
        print(text + '\n')
コード例 #5
0
def scrape_twitter(google_client):
    tw = Twitter()
    # tweets = tw.tweets(keywords='JetBlue', stream=False, limit=10) #sample from the public stream
    # print(tweets)
    oauth = credsfromfile()
    client = Query(**oauth)
    tweets = client.search_tweets(
        keywords='JetBlue OR #JetBlue -filter:retweets', limit=10000)

    topics_dict = { "tweet_texts":[], \
                    "ent_score":[], \
                    "ent_magn":[], \
                    "overall_score":[], \
                    "overall_magn":[]}

    for tweet in tqdm(tweets):
        topics_dict["tweet_texts"].append(tweet['text'])
        ent_score, ent_magnitude, doc_score, doc_magnitude = analyze_text(
            google_client, text=tweet['text'])
        topics_dict["ent_score"].append(ent_score)
        topics_dict["ent_magn"].append(ent_magnitude)
        topics_dict["overall_score"].append(doc_score)
        topics_dict["overall_magn"].append(doc_magnitude)
        # pprint(tweet, depth=1)
        # print('\n\n')

    print('Total Count:', len(topics_dict["tweet_texts"]))
    metrics = ["ent_score", "ent_magn", "overall_score", "overall_magn"]
    for metric in metrics:
        metric_score = np.asarray(topics_dict[metric])
        print(metric, "Mean:", np.mean(metric_score), "St Dev:",
              np.std(metric_score))

    with open('./csvs/twitter-jetblue-sentiment.json', 'w') as fp:
        json.dump(topics_dict, fp)
コード例 #6
0
ファイル: twitter_demo.py プロジェクト: MorLong/nltk
def expand_tweetids_demo():
    """
    Given a file object containing a list of Tweet IDs, fetch the
    corresponding full Tweets.
    
    """
    ids_f =\
        io.StringIO("""\
        588665495492124672
        588665495487909888
        588665495508766721
        588665495513006080
        588665495517200384
        588665495487811584
        588665495525588992
        588665495487844352
        588665495492014081
        588665495512948737""")
    oauth = credsfromfile()
    client = Query(**oauth)
    hydrated = client.expand_tweetids(ids_f)

    for tweet in hydrated:
        try:
            id_str = tweet['id_str']
            print('id: {}\ntext: {}\n'.format(id_str, tweet['text']))
        except IndexError:
            pass
コード例 #7
0
ファイル: twitter_demo.py プロジェクト: prz3m/kind2anki
def search_demo(keywords='nltk'):
    """
    Use the REST API to search for past tweets containing a given keyword.
    """
    oauth = credsfromfile()
    client = Query(**oauth)
    for tweet in client.search_tweets(keywords=keywords, limit=10):
        print(tweet['text'])
コード例 #8
0
ファイル: twitter_demo.py プロジェクト: prz3m/kind2anki
def tweets_by_user_demo(user='******', count=200):
    """
    Use the REST API to search for past tweets by a given user.
    """
    oauth = credsfromfile()
    client = Query(**oauth)
    client.register(TweetWriter())
    client.user_tweets(user, count)
コード例 #9
0
ファイル: twitterapi.py プロジェクト: GuilhermeFerreira08/Git
 def get_tweet_by_id(self, filepath, tw_id):
     ids = str(tw_id)
     ids = StringIO(ids)
     client = Query(**oauth)
     hydrated = client.expand_tweetids(ids_f)
     tw = read_csv_tweets(filepath)
     for i in hydrated:
         yield tw.loc[tw['user.id'] == i]['text']
コード例 #10
0
ファイル: twitter_demo.py プロジェクト: vishalbelsare/nltk
def search_demo(keywords="nltk"):
    """
    Use the REST API to search for past tweets containing a given keyword.
    """
    oauth = credsfromfile()
    client = Query(**oauth)
    for tweet in client.search_tweets(keywords=keywords, limit=10):
        print(tweet["text"])
コード例 #11
0
ファイル: twitter_demo.py プロジェクト: vishalbelsare/nltk
def lookup_by_userid_demo():
    """
    Use the REST API to convert a userID to a screen name.
    """
    oauth = credsfromfile()
    client = Query(**oauth)
    user_info = client.user_info_from_id(USERIDS)
    for info in user_info:
        name = info["screen_name"]
        followers = info["followers_count"]
        following = info["friends_count"]
        print(f"{name}, followers: {followers}, following: {following}")
コード例 #12
0
ファイル: twitter_demo.py プロジェクト: prz3m/kind2anki
def lookup_by_userid_demo():
    """
    Use the REST API to convert a userID to a screen name.
    """
    oauth = credsfromfile()
    client = Query(**oauth)
    user_info = client.user_info_from_id(USERIDS)
    for info in user_info:
        name = info['screen_name']
        followers = info['followers_count']
        following = info['friends_count']
        print("{0}, followers: {1}, following: {2}".format(name, followers, following))
コード例 #13
0
def lookup_by_userid_demo():
    """
    Use the REST API to convert a userID to a screen name.
    """
    oauth = credsfromfile()
    client = Query(**oauth)
    user_info = client.user_info_from_id(USERIDS)
    for info in user_info:
        name = info['screen_name']
        followers = info['followers_count']
        following = info['friends_count']
        print("{0}, followers: {1}, following: {2}".format(name, followers, following))
コード例 #14
0
    def obtener_Twits(listaPalabras, DicPalabras):
        listaPalabrasConsulta = []
        # Esto podria mejorarlo
        # size = len(listaPalabras) / 2
        for x in list(DicPalabras)[0:4]:
            listaPalabrasConsulta.append(x)
        print("Lista de palabras para la consulta: ", listaPalabrasConsulta)

        # Consulta a Twitter, genera un and de las palabras mmas importantes (El espacio es AND logico y , es un OR Logico)
        txt = ' '.join(listaPalabrasConsulta)
        oauth = credsfromfile()
        client = Query(**oauth)
        tweets = client.search_tweets(keywords=txt, limit=10)

        arrTweets = []
        for tweet in tweets:
            arrTweets.append(Standardizer.standardize(tweet['text']))
        return arrTweets
コード例 #15
0
ファイル: twitter_demo.py プロジェクト: prz3m/kind2anki
def limit_by_time_demo(keywords="nltk"):
    """
    Query the REST API for Tweets about NLTK since yesterday and send
    the output to terminal.

    This example makes the assumption that there are sufficient Tweets since
    yesterday for the date to be an effective cut-off.
    """
    date = yesterday()
    dt_date = datetime.datetime(*date)
    oauth = credsfromfile()
    client = Query(**oauth)
    client.register(TweetViewer(limit=100, lower_date_limit=date))

    print("Cutoff date: {}\n".format(dt_date))

    for tweet in client.search_tweets(keywords=keywords):
        print("{} ".format(tweet['created_at']), end='')
        client.handler.handle(tweet)
コード例 #16
0
ファイル: twitter.py プロジェクト: WimFlorijn/NLP
    def add_tweets(self, user, party):
        """
        Downloads tweets from a single Twitter user up to the specified ID.

        :param user: the Twitter handle of the user.
        :param party: the political party to which `user` belongs.
        :return the list of downloaded tweets.
        """

        query = Query(**self.oauth)
        tweets = query.get_user_timeline(
            screen_name=user,
            count=200,
            exclude_replies='false',
            include_rts='true')
        self.tweets[user] = tweets
        self.save()
        self.users[user] = party
        return tweets
コード例 #17
0
ファイル: twitterapi.py プロジェクト: GuilhermeFerreira08/Git
 def get_users(self, *args: None):  #by IdUser
     client = Query(**oauth)
     user_info = clinet.user_info_from_id(*args)
     users = []
     for user in user_info:
         name, followers, following = user_info['name'], user_info[
             'followers_count'], user_info['friends_count']
         users.append(user)
         print(f'{name} {followers} {following}\n')
     return users
コード例 #18
0
ファイル: twitter_demo.py プロジェクト: vishalbelsare/nltk
def limit_by_time_demo(keywords="nltk"):
    """
    Query the REST API for Tweets about NLTK since yesterday and send
    the output to terminal.

    This example makes the assumption that there are sufficient Tweets since
    yesterday for the date to be an effective cut-off.
    """
    date = yesterday()
    dt_date = datetime.datetime(*date)
    oauth = credsfromfile()
    client = Query(**oauth)
    client.register(TweetViewer(limit=100, lower_date_limit=date))

    print(f"Cutoff date: {dt_date}\n")

    for tweet in client.search_tweets(keywords=keywords):
        print("{} ".format(tweet["created_at"]), end="")
        client.handler.handle(tweet)
コード例 #19
0
def getOpinionsOfTopic(topic, oauth, num_of_tweets):
    raw_tweets = []
    client = Query(**oauth)
    tweets = client.search_tweets(keywords=topic, limit=num_of_tweets)
    for tweet in tweets:
        raw_tweets.append(tweet)
    tweets, retweet_counts, fave_counts, followers_count = preprocess_tweet(raw_tweets)
    sentiments, totals = getOpinionTotals(tweets, retweet_counts, fave_counts, followers_count)

    adjustedTotal = totals['Positive'] + totals['Negative'] + totals['Neutral']
    posPercent = totals['Positive'] / adjustedTotal
    negPercent = totals['Negative'] / adjustedTotal
    neuPercent = totals['Neutral'] / adjustedTotal
    print("Opinions for the topic \"{}\":\nPositive: {:.0%}, Negative: {:.0%}, Neutral: {:.0%} out of {} tweets.\n"
                  .format(topic, posPercent, negPercent, neuPercent, num_of_tweets))

    greatestTotal = float(max(totals.values()))
    opinion = ""
    for key in totals.keys():
        if totals[key] == greatestTotal:
            opinion = key.lower()
    if opinion != 'Neutral'.lower():
        print("The topic was mostly {}. Finding the most {} tweet.".format(opinion, opinion))
    else:
        print("The topic was mostly neutral. Unable to find the most neutral tweet.")

    sent = {'pos' : 0, 'neg' : 0,'neu' : 0, 'compound' : 0}
    sentTweet = ""
    for i in range(len(tweets)):
        if opinion == 'Positive'.lower():
            if (sentiments[i]['compound'] >= sent['compound'] and sentiments[i]['pos'] > sent['pos']):
                sent = sentiments[i]
                sentTweet = raw_tweets[i]
        elif opinion == 'Negative'.lower():
            if (sentiments[i]['compound'] <= sent['compound'] and sentiments[i]['neg'] > sent['neg']):
                sent = sentiments[i]
                sentTweet = raw_tweets[i]

    if opinion != 'Neutral'.lower():
        print("Most {} tweet: {}".format(opinion, sentTweet['text']))
        print("URL: https://twitter.com/statuses/{}".format(sentTweet['id']))
    print("------------------------------------")
コード例 #20
0
ファイル: twitter_demo.py プロジェクト: vishalbelsare/nltk
def tweets_by_user_demo(user="******", count=200):
    """
    Use the REST API to search for past tweets by a given user.
    """
    oauth = credsfromfile()
    client = Query(**oauth)
    client.register(TweetWriter())
    client.user_tweets(user, count)
コード例 #21
0
def search():
    oauth = credsfromfile()
    client = Query(**oauth)
    df = pd.read_csv('twitter_users.csv')
    df = df[df['Flag'] == 'Use']

    terms = set([
        '@' + u.replace('https://twitter.com/', '') for u in df['URL'].values
    ])

    with open('terms.pkl', 'rb') as f:
        terms = terms.union(pickle.load(f))

    searches = 0

    li_html = '<li>name={0} created={1} favorited={2} retweeted={3} \
        {4} query={5}</li>'

    for term in terms:
        searches += 1
        row = twitter_searches.find_one(query=term)

        if row is not None:
            if hours_from_now(row['search_date']) < 24:
                continue

        tweets = client.search_tweets(keywords=term + ' python http -RT',
                                      lang='en',
                                      limit=5)

        for t in tweets:
            if int(t['favorite_count']) == 0:
                log.debug('No favorites')
                continue

            text = t['text']
            dt = datetime.strptime(t['created_at'], '%a %b %d %H:%M:%S %z %Y')

            if hours_from_now(dt) > 24:
                continue

            if core.not_english(text):
                log.debug('Not english: {}'.format(text))
                continue

            log.debug('Searching for {}'.format(term))
            uname = t['user']['screen_name']
            uname_html = '<a href="https://twitter.com/{0}">{0}</a>'
            users = [
                v.replace('https://twitter.com/', '')
                for v in pd.read_csv('twitter_users.csv')['URL'].values
            ]

            with open('twitter_users.csv', 'a') as users_csv:

                if uname not in set(users):
                    users_csv.write('{0},{1},Recommended\n'.format(
                        datetime.now(), 'https://twitter.com/' + uname))

            html = li_html.format(uname_html.format(uname), t['created_at'],
                                  t['favorite_count'], t['retweet_count'],
                                  hrefs_from_text(text), term)

            twitter_searches.upsert(
                dict(query=term, search_date=datetime.now(), html=html),
                ['query', 'html'])
        if searches == 150:
            break
コード例 #22
0
# 		u'protected': False,
# 		u'default_profile': False,
# 		u'is_translator': False
# 	},
# 	u'geo': None,
# 	u'in_reply_to_user_id_str': None,
# 	u'lang': u'en',
# 	u'created_at': u'Sat Apr 23 19:57:28 +0000 2016',
# 	u'in_reply_to_status_id_str': None,
# 	u'place': None,
# 	u'metadata': {u'iso_language_code': u'en', u'result_type': u'recent'}
# }

if __name__ == '__main__':
    oauth = credsfromfile()
    client = Query(**oauth)
    with open(searchfile, 'rb') as f_search:

        search_terms = [
            term.strip() for term in f_search.readlines() if term.strip()
        ]

        # Get tweets for specific search terms
        for term in search_terms:
            print "Collecting {term}".format(term=term)
            search_data = []

            tweets = client.search_tweets(
                keywords="{term} -filter:retweets".format(term=term),
                limit=float('inf'))
            while True:
コード例 #23
0
ファイル: twitter.py プロジェクト: ivanidris/SoNaR
def search():
    oauth = credsfromfile()
    client = Query(**oauth)
    df = pd.read_sql('SELECT URL FROM twitter_users',
                     db.executable.raw_connection())

    users = set([u.replace('https://twitter.com/', '')
                 for u in df['URL'].values])
    terms = set(['@' + u for u in users])

    with open('terms.pkl', 'rb') as f:
        terms = terms.union(pickle.load(f))

    searches = 0

    li_html = 'name={0} created={1} favorited={2} retweeted={3} \
        {4} query={5}'

    for term in terms:
        searches += 1
        row = twitter_searches.find_one(query=term)

        if row is not None:
            if hours_from_now(row['search_date']) < 24:
                continue

        tweets = client.search_tweets(keywords=term + ' python http -RT',
                                      lang='en')

        for t in tweets:
            if int(t['favorite_count']) == 0:
                log.debug('No favorites')
                continue

            text = t['text']
            dt = datetime.strptime(t['created_at'],
                                   '%a %b %d %H:%M:%S %z %Y')

            if hours_from_now(dt) > 24:
                continue

            if core.not_english(text):
                log.debug('Not english: {}'.format(text))
                continue

            log.debug('Searching for {}'.format(term))
            uname = t['user']['screen_name']
            uname_html = '<a href="https://twitter.com/{0}">{0}</a>'

            if uname not in set(users):
                db['twitter_users'].insert(
                    dict(Flag='Recommended', Date=datetime.now(),
                         URL='https://twitter.com/' + uname))

            html = li_html.format(uname_html.format(uname), t['created_at'],
                                  t['favorite_count'], t['retweet_count'],
                                  hrefs_from_text(text), term)

            twitter_searches.upsert(dict(query=term,
                                         search_date=datetime.now(),
                                         html=html),
                                    ['query', 'html'])
        if searches == 150:
            break
コード例 #24
0
from nltk.twitter import Query, credsfromfile, TweetViewer
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
import sys

if (len(sys.argv)<4):
    print ('Usage:', sys.argv[0], ' twitter_username max_tweets_to_search max_top_words_to_print lemmatize(optional)' )
    quit()

#capture the output of tweetViewer to file for processing
sys.stdout = open('tweets.txt', 'w')

oauth = credsfromfile()
client = Query(**oauth)
client.register(TweetViewer(limit=sys.argv[2]))
client.user_tweets(sys.argv[1], sys.argv[2])


#give back control to stdout
sys.stdout = sys.__stdout__
lemmatizer = WordNetLemmatizer()

if (len(sys.argv)>4 and sys.argv[4].lower()=='lemmatize'):
    lemmatize=True
else:
    lemmatize=False


def text_cleaner(documents):
    text_cleaned = []
    for document in documents:
コード例 #25
0
ファイル: twitter.py プロジェクト: zshwuhan/SoNaR
def search():
    oauth = credsfromfile()
    client = Query(**oauth)
    df = pd.read_csv('twitter_users.csv')
    df = df[df['Flag'] == 'Use']

    terms = set(['@' + u.replace('https://twitter.com/', '')
                 for u in df['URL'].values])

    with open('terms.pkl', 'rb') as f:
        terms = terms.union(pickle.load(f))

    searches = 0

    li_html = '<li>name={0} created={1} favorited={2} retweeted={3} \
        {4} query={5}</li>'

    for term in terms:
        searches += 1
        row = twitter_searches.find_one(query=term)

        if row is not None:
            if hours_from_now(row['search_date']) < 24:
                continue

        tweets = client.search_tweets(keywords=term + ' python http -RT',
                                      lang='en', limit=5)

        for t in tweets:
            if int(t['favorite_count']) == 0:
                log.debug('No favorites')
                continue

            text = t['text']
            dt = datetime.strptime(t['created_at'],
                                   '%a %b %d %H:%M:%S %z %Y')

            if hours_from_now(dt) > 24:
                continue

            if core.not_english(text):
                log.debug('Not english: {}'.format(text))
                continue

            log.debug('Searching for {}'.format(term))
            uname = t['user']['screen_name']
            uname_html = '<a href="https://twitter.com/{0}">{0}</a>'
            users = [v.replace('https://twitter.com/', '')
                     for v in pd.read_csv('twitter_users.csv')['URL'].values]

            with open('twitter_users.csv', 'a') as users_csv:

                if uname not in set(users):
                    users_csv.write('{0},{1},Recommended\n'.format(
                        datetime.now(), 'https://twitter.com/' + uname))

            html = li_html.format(uname_html.format(uname), t['created_at'],
                                  t['favorite_count'], t['retweet_count'],
                                  hrefs_from_text(text), term)

            twitter_searches.upsert(dict(query=term,
                                         search_date=datetime.now(),
                                         html=html),
                                    ['query', 'html'])
        if searches == 150:
            break
コード例 #26
0
ファイル: twitter.py プロジェクト: zozoh94/PolReview
    print('\n' + candidate['name'])
    print("\n" + subject + " :")
    print("Mots concernées : " + str(candidate['pro'] + candidate['cons']))
    print("Avis pour : " + str(candidate['pro']))
    print("Avis contre : " + str(candidate['cons']))
    print("Sans avis : " + str(nb_tweets -
                               (candidate['pro'] + candidate['cons'])))
    print("Indice pour : " + str(candidate['pro'] / nb_tweets))
    print("Indice contre : " + str(candidate['cons'] / nb_tweets))
    if (candidate['pro'] > candidate['cons']):
        print("Les gens sont pour ce candidat")
    elif (candidate['cons'] > candidate['pro']):
        print("Les gens sont contre ce candidat")
    else:
        print("Les gens sont partagés")
    print('\n\n')


if __name__ == '__main__':
    print("Loadind tweets & analyzing ...")
    subject = 'people_tweets'
    for candidate in candidates_tweets:
        tw = Twitter()
        oauth = credsfromfile()
        client = Query(**oauth)
        tweets = client.search_tweets(keywords=candidate['name'],
                                      limit=nb_tweets)
        analyze_tweets(candidate, tweets, subject)

        print_results(candidate, subject)
コード例 #27
0
ファイル: twitterapi.py プロジェクト: GuilhermeFerreira08/Git
 def get_twiter(self, keywords):
     client = Query(**oauth)
     tweets = client.search_tweets(keywords, limit)
     tweet = next(tweets)
     return tweet
コード例 #28
0
ファイル: main.py プロジェクト: TobyVries/Text-Summarization
        lexrank.display_comparison()
        lsa.display_comparison()
        kl_sum.display_comparison()
        luhn.display_comparison()
        sumbasic.display_comparison()

        res = input("Press 'r' to restart\n")
        if res != 'r':
            restart = False

    elif choice == '2':  # summarize a twitter topic
        tweet_topic = input("Enter the topic you want a summary for\n")

        # Authenticate and retrieve tweets based on user entered topic
        oauth = credsfromfile()
        client = Query(**oauth)
        client.register(TweetWriter())
        tweets = client.search_tweets(keywords=tweet_topic,
                                      limit=100,
                                      lang='en')

        tweetSummarizer = TweetSummarizer()

        # clean tweets and store in tweets.csv
        rows = []
        usable_rows = []
        for tweet in tweets:
            rows.append(str(tweet['text']))
        if len(rows) > 0:
            usable_rows = rows.copy()
            for i in range(0, len(rows)):
コード例 #29
0
import random
import json
import re
import csv
from nltk.twitter import Twitter
from nltk.twitter import Query, Streamer, Twitter, TweetViewer, TweetWriter, credsfromfile
from nltk.sentiment.vader import SentimentIntensityAnalyzer

tw = Twitter()
sid = SentimentIntensityAnalyzer()

# Grab credentials from file
oauth = credsfromfile()

# Search API
client = Query(**oauth)
tweets = client.search_tweets(keywords='Korean, summit', limit=10000)
tweet = next(tweets)

# Open data file
outfile = open("korean_summit_auto.csv", "a")
writer = csv.writer(outfile)
mydata = [
    'DATE', 'TWEET', 'COMPOUND', 'NEGATIVE', 'NEUTRAL', 'POSITIVE', 'LATITUDE',
    'LONGITUDE'
]
# writer.writerow(mydata)


def pre_process_text(tweet):
    text = []
コード例 #30
0
# export TWITTER="twitter.txt"

from nltk.twitter import Twitter, Query, Streamer, credsfromfile
import pickle
from pprint import pprint

__author__ = 'kongaloosh'

import json
from pprint import pprint

with open('data/investments.json') as data_file:
# with open('data.json') as data_file:
    oauth = credsfromfile()
    data = json.load(data_file)
    tw = Twitter()
    client = Query(**oauth)

    for i in range(len(data['investments'])):
            if type(dict(data['investments'][i])):
                tweets = client.search_tweets(keywords=data['investments'][i]['name'], limit=100)
                tweets = list(tweets)
                data['investments'][i]['tweets'] = tweets

    with open('data_pickle.pkl', 'w') as outfile:
        pickle.dump(data, outfile)

f = pickle.load(open('data_pickle.pkl', 'r'))
print(f)
コード例 #31
0
from nltk.twitter import Query, credsfromfile
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import datetime as dt
import os
import pickle
import sys
sys.path.append("../bhtsa")
from twitter_senti_analyzer import senti_score_time

# settings
oauth = credsfromfile()
client = Query(**oauth)
twtNum = 1000
startTime = [2016, 11, 8, 12, 0]
step = 30
step_num = 48

path = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'model')
f = open(os.path.join(path, 'NBClassifier.pickle'), 'r')
NBC = pickle.load(f)

origin = dt.datetime(startTime[0], startTime[1], startTime[2], startTime[3],
                     startTime[4])
dates = []
for i in range(step_num):
    next_val = origin + dt.timedelta(minutes=step * i)
    dates.append(next_val)

hilary_score = senti_score_time('hilary clinton', client, NBC, twtNum,
コード例 #32
0
import json
import re
import csv
from nltk.twitter import Twitter
from nltk.twitter import Query, Streamer, Twitter, TweetViewer, TweetWriter, credsfromfile
from nltk.sentiment.vader import SentimentIntensityAnalyzer


tw = Twitter()
sid = SentimentIntensityAnalyzer()

# Grab credentials from file
oauth = credsfromfile()

# Search API
client = Query(**oauth)
tweets = client.search_tweets(keywords='Syria', limit=10000)
tweet = next(tweets)

# Open data file
outfile = open("syria_auto.csv","a")
writer = csv.writer(outfile)
mydata = ['DATE', 'TWEET', 'COMPOUND', 'NEGATIVE', 'NEUTRAL', 'POSITIVE','LATITUDE','LONGITUDE']
# writer.writerow(mydata)

def pre_process_text(tweet):
    text = []
    words_list = []
    clean_list = []

    # Get all tweet text in english
コード例 #33
0
from nltk.twitter import Query, credsfromfile, TweetViewer
import process_twt
from NBClassifier import NBClassifier
from SCClassifier import SCClassifier
from BGClassifier import BGClassifier
from nltk.corpus import twitter_samples, TwitterCorpusReader
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np

# settings
oauth = credsfromfile()
client = Query(**oauth)
twtNum = 10
client.register(TweetViewer(limit=twtNum))
tweets_gen = client.search_tweets(keywords='hearthstone', lang='en')
tweets = []
slangdict = process_twt.get_slang_dict()
twt_list = []
for t in tweets_gen:
    twt_list.append(process_twt.preprocess(t['text'], slangdict=slangdict))
twt_list = list(set(twt_list))

for t in twt_list[:twtNum]:
    print t

fileIds = twitter_samples.fileids()
root = twitter_samples.root

# read tweet data from corpus
コード例 #34
0
from nltk.twitter import Query, Streamer, Twitter, TweetViewer, TweetWriter, credsfromfile
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import seaborn as sns

#Rest API
from nltk.twitter import Twitter
tw = Twitter()
# tw.tweets(keywords='LokSabhaElection2019', limit=2)
tw.tweets(keywords='LokSabhaElection2019', stream=False, limit=20)

## Read tweets
totaltweets = 0
oauth = credsfromfile()
client = Query(**oauth)
f = open('E:/temp/twitter.txt', 'w')
tweets = client.search_tweets(keywords='LokSabhaElection2019', limit=10000)
for tweet in tweets:
    print(tweet['text'])
    try:
        f.write(tweet['text'])
        totaltweets += 1
    except Exception:
        pass
f.close()

f = open('E:/temp/twitter.txt', 'a')
oauth = credsfromfile()
client = Query(**oauth)
tweets = client.search_tweets(keywords='Elections2019', limit=10000)
import random
import json
import re
import csv
from nltk.twitter import Twitter
from nltk.twitter import Query, Streamer, Twitter, TweetViewer, TweetWriter, credsfromfile
from nltk.sentiment.vader import SentimentIntensityAnalyzer

tw = Twitter()
sid = SentimentIntensityAnalyzer()

# Grab credentials from file
oauth = credsfromfile()

# Search API
client = Query(**oauth)
tweets = client.search_tweets(keywords='Bitcoin, #cryptocurrency', limit=10000)
tweet = next(tweets)

# Open data file
outfile = open("bitcoin_auto.csv", "a")
writer = csv.writer(outfile)
mydata = [
    'DATE', 'TWEET', 'COMPOUND', 'NEGATIVE', 'NEUTRAL', 'POSITIVE', 'LATITUDE',
    'LONGITUDE'
]
# writer.writerow(mydata)


def pre_process_text(tweet):
    text = []
コード例 #36
0
from nltk.twitter.common import json2csv
from nltk.twitter.common import json2csv_entities
from nltk.corpus import twitter_samples
from nltk.twitter import Query, Streamer, Twitter, TweetViewer, TweetWriter, credsfromfile
import pandas as pd

oauth = credsfromfile()
n = 10  # 設定拿取 tweets 資料則數
username = '******'

# Query
client = Query(**oauth)  # 歷史資料
client.register(TweetWriter())  # 寫入
client.user_tweets(username, n)  # 拿取 tweets 資料(n則)

'''
使用 json2csv 存取 tweets 資料 (text欄位)
input_file 的 abspath 需參考上述 Query 寫入資料的路徑做修改
'''

input_file = twitter_samples.abspath('/Users/youngmihuang/twitter-files/tweets.20180726-155316.json')
with open(input_file) as fp:
    json2csv(fp, 'tweets_text.csv', ['text'])

# 讀取
data = pd.read_csv('tweets_text.csv')
for line in data.text:
    print('Trump tweets content: ')
    print(line)

# 斷詞
コード例 #37
0
ファイル: Twitter_NLP.py プロジェクト: S-Black/Examples
def frequencyDistribution(data):
    return {i: data.count(i) for i in data}


#LIVE twitter feed
#------------------
#get 10 twitter messages with #whatdoyouwant
tw = Twitter()
tw.tweets(keywords='nationalgriduk', stream=False, limit=10)

brand = 'nationalgriduk'

#API keys
#------------------------
oauth = credsfromfile()
client = Query(**oauth)
tweets = client.search_tweets(keywords=brand, limit=20000)
tweet = next(tweets)
pprint(tweet, depth=1)

#make sure tweets can be encoded
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
#print(x.translate(non_bmp_map))

# Sentiment analysis
#-------------------------------
analyzer = SentimentIntensityAnalyzer()  #vadersentiment object

Data = []
Words = []
Label = []