def main():
    query = sys.argv[1]
    since, until = sys.argv[2].split("..")

    client = TwitterClient(consumer_key=settings.CONSUMER_KEY,
                           consumer_secret=settings.CONSUMER_SECRET,
                           access_token_key=settings.ACCESS_TOKEN_KEY,
                           access_token_secret=settings.ACCESS_TOKEN_SECRET)
    result = client.search(query, since, until)
    words = sum(
        [nagisa.extract(r.text, extract_postags=["名詞"]).words for r in result],
        [])
    lower_query = query.lower()
    without_num = [
        w for w in words if not w.isdigit() and not w.lower() == lower_query
    ]

    cloud = WordCloud(background_color="white",
                      contour_width=5,
                      contour_color="royalblue").generate(
                          " ".join(without_num))

    plt.figure()
    plt.imshow(cloud, interpolation="bilinear")
    plt.axis("off")
    plt.show()
def search():
    api = TwitterClient()
    q = request.args.get('q')

    # calling function to get tweets
    tweets = api.get_tweets(query=q, count=200)

    # picking positive tweets from tweets
    ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive']

    positive = round(100 * len(ptweets) / len(tweets), 4)

    # picking negative tweets from tweets
    ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative']

    negative = round(100 * len(ntweets) / len(tweets), 4)

    # percentage of neutral tweets

    neutral = round(
        100 * (len(tweets) - len(ntweets) - len(ptweets)) / len(tweets), 4)

    return render_template('index.html',
                           has_results=True,
                           positive=positive,
                           negative=negative,
                           neutral=neutral,
                           q=q,
                           ptweets=random.sample(ptweets, 3),
                           ntweets=random.sample(ntweets, 3))
Esempio n. 3
0
def sentimental():
    # creating object of TwitterClient Class
    hashtag = request.query['hashtag']
    api = TwitterClient()
    # calling function to get tweets

    people = {}

    tweets = api.get_tweets(query='#' + hashtag, count=10)

    # picking positive tweets from tweets
    positive_tweets = [
        tweet for tweet in tweets if tweet['sentiment'] == 'positive'
    ]
    # percentage of positive tweets
    positive = 100 * len(positive_tweets) / len(tweets)
    # picking negative tweets from tweets
    negative_tweets = [
        tweet for tweet in tweets if tweet['sentiment'] == 'negative'
    ]
    # percentage of negative tweets
    negative = 100 * len(negative_tweets) / len(tweets)
    # percentage of neutral tweets
    neutral = 100 * (len(tweets) - len(negative_tweets) -
                     len(positive_tweets)) / len(tweets)
    data = {
        "parsed_tweets": len(tweets),
        "positive": positive,
        "negative": negative,
        "neutral": neutral,
        "tweets": tweets,
    }
    response.content_type = 'application/json'
    return json.dumps(data)
Esempio n. 4
0
    def __init__(self):

        # Annoucing
        self.log.header('main:title')

        # Registering Dependencies
        self.twitter = TwitterClient()
        self.saijiki = Saijiki()
 def __init__(self):
     self.TC = TwitterClient()
     self.IC = InstagramClient()
     self.t_entities = None
     self.i_entities = None
     self.music = [
         "singer", "songwriter", "recording artist", "musician", "vocalist",
         "band", "player"
     ]
Esempio n. 6
0
def main():
    erc = EntityRecongnitionClient()
    user_name = 'Lena Blietz'
    tc = TwitterClient()
    ic = InstagramClient()
    sn_twitter = tc.search_username(user_name)
    sn_instagram = ic.get_username(user_name)
    # print "-----------Instagram Entities --------------"
    # print erc.get_entities_from_instagram(sn_instagram)
    # print "-----------Twitter Entities --------------"
    # print erc.get_entities_from_tweets(sn_twitter,50)
    print "-----------Musicians --------------"
    print erc.get_musicians(sn_twitter, sn_instagram)
Esempio n. 7
0
def recursive_friendship_crawl(user_id,
                               screen_name,
                               iterations_left,
                               max_friends_per_node=100):

    global twitter_client, user_ids_expanded

    if twitter_client is None:
        twitter_client = TwitterClient()
    if user_ids_expanded is None:
        user_ids_expanded = []

    # get a list of friends for this user
    friend_objs = twitter_client.get_friend_list(
        user=user_id, num_friends=max_friends_per_node)
    # turn the list of user objects into a list of minimal information (id and screen name)
    friend_list = {
        friend.id_str: {
            'id': friend.id_str,
            'screen_name': friend.screen_name
        }
        for friend in friend_objs
    }
    # create a dictionary to store this user's list of friends
    user_dict = {
        'id': user_id,
        'screen_name': screen_name,
        'friends': friend_list
    }
    # store the friends list in a .pickle file
    with open(os.path.join('users', '{}.pickle'.format(user_id)), 'wb') as f:
        pickle.dump(user_dict, f)
    # iterate friends
    for friend_obj in friend_objs:
        # if this user has already been expanded
        if friend_obj.id_str not in user_ids_expanded:
            # add this friend to the list of expanded old_list_of_users so that we don't try to expand it again
            user_ids_expanded.append(friend_obj.id_str)
            # while we haven't gone to the full recursive depth
            if iterations_left > 1:
                # get this friends list of friends
                recursive_friendship_crawl(user_id=friend_obj.id_str,
                                           screen_name=friend_obj.screen_name,
                                           iterations_left=iterations_left - 1)
Esempio n. 8
0
    def get(self):
        twitter_client = TwitterClient()
        query = self.get_argument("q")
        print "Fetching tweets"
        data = twitter_client.getTweets(query)

        print "Processing for sentiment"
        tweets = data['statuses']
        idList = []
        response = []
        for tweet in tweets:
            if 'retweeted_status' in tweet.keys() and type(
                    tweet['retweeted_status']) == type({}):
                tweets.append(tweet['retweeted_status'])
            if tweet['id'] in idList:
                continue
            idList.append(tweet['id'])
            row = {
                'created_at': tweet['created_at'],
                'count': {
                    'favorite': tweet['favorite_count'],
                    'retweet': tweet['retweet_count'],
                    'views': tweet['user'].get('followers_count', 0)
                },
                'text': tweet['text'],
                'hashtags': [x['text'] for x in tweet['entities']['hashtags']],
                'user': {
                    'description': tweet['user'].get('description', None),
                    'favorite': tweet['user'].get('favorites_count', 0),
                    'followers': tweet['user'].get('followers_count', 0),
                    'location': tweet['user'].get('location', None),
                    'name': tweet['user'].get('name', None),
                    'image': tweet['user'].get('profile_image_url', None),
                    'tweets': tweet['user'].get('statuses_count', None),
                    'verified': tweet['user'].get('verified', None)
                },
                'sentiment': sentiment_classifier.getSentiment(tweet['text']),
                'fake': fakeAnalyser(tweet['text'])
            }
            response.append(row)
        #response = json.loads(open("result.json", "r").read())

        return self.write_json({'success': True, 'data': response})
Esempio n. 9
0
    def get(self):
        twitter_client = TwitterClient()
        query = self.get_argument("q")
        print "Fetching tweets"
        data = twitter_client.getTweets(query)

        print "Processing for sentiment"
        tweets = data['statuses']
        idList = []
        response = []
        for tweet in tweets:
            if 'retweeted_status' in tweet.keys() and type(tweet['retweeted_status']) == type({}):
                tweets.append(tweet['retweeted_status'])
            if tweet['id'] in idList:
                continue
            idList.append(tweet['id'])
            row = { 'created_at': tweet['created_at'],
                    'count': { 'favorite': tweet['favorite_count'],
                               'retweet': tweet['retweet_count'],
                               'views': tweet['user'].get('followers_count', 0)
                    },
                    'text': tweet['text'],
                    'hashtags': [x['text'] for x in tweet['entities']['hashtags']],
                    'user': { 'description': tweet['user'].get('description', None),
                              'favorite': tweet['user'].get('favorites_count', 0),
                              'followers': tweet['user'].get('followers_count', 0),
                              'location': tweet['user'].get('location', None),
                              'name': tweet['user'].get('name', None),
                              'image': tweet['user'].get('profile_image_url', None),
                              'tweets': tweet['user'].get('statuses_count', None),
                              'verified': tweet['user'].get('verified', None)
                    },
                    'sentiment': sentiment_classifier.getSentiment(tweet['text']),
                    'fake': fakeAnalyser(tweet['text'])
            }
            response.append(row)
        #response = json.loads(open("result.json", "r").read())
        
        return self.write_json({'success': True, 'data': response})
Esempio n. 10
0
def main(args):
    log_banner_start()
    start_time = datetime.datetime.now()

    # parse arguments from command-line
    parser = create_parser(args)
    args_ns = parser.parse_args()
    logger.info('Command-line arguments recieved and parsed')

    # set log level for program
    log_levels = [
        logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
        logging.CRITICAL
    ]
    logger.setLevel(log_levels[int(args_ns.log_lvl)])
    if args:
        logger.info('Custom log-level set in accordance with ' +
                    'command-line argument provided')

    # signal handler configuration
    config_signal_handlers()

    # connect to Twitter Client
    with TwitterClient(
            consumer_key=os.environ['CONSUMER_KEY'],
            consumer_secret=os.environ['CONSUMER_SECRET'],
            access_token=os.environ['ACCESS_TOKEN'],
            access_token_secret=os.environ['ACCESS_TOKEN_SECRET']) as twitter:
        logger.info('Twitter client connected')

        # connect to Slack
        slack = SlackClient(oauth_token=os.environ['SLACK_TOKEN'])
        logger.info('Slack client connected')

        # connect Twitter Client to Slack Client to each other
        slack.register_twitter_client(twitter)
        logger.info('Slack and Twitter clients connected to each other')

        slack.run()

        log_banner_stop(start_time)
  print "[INFO] Authorize this application: {}".format(flow.start())
  authCode = raw_input("Enter auth code here: ").strip()

  # finish the authorization and grab the Dropbox client
  (accessToken, userID) = flow.finish(authCode)
  client = DropboxClient(accessToken)
  print "[SUCCESS] dropbox account linked"

# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = tuple(conf["resolution"])
camera.framerate = conf["fps"]
rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"]))

if conf["use_twitter"]:
  twitterClient = TwitterClient(conf['twitter'])
  sentTweet = False

# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print "[INFO] warming up..."
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0

# capture frames from the camera
for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
  # grab the raw NumPy array representing the image and initialize
  # the timestamp and occupied/unoccupied text
  frame = f.array
Esempio n. 12
0
def stream_tweet(hashtag):
    return statement(
        TwitterClient(db['access_token_key'],
                      db["access_token_secret"]).get_tweets(hashtag,
                                                            num_of_posts=20))
Esempio n. 13
0
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 18:21:53 2018

@author: Ruslan
"""

from helpers import Helpers
from twitter_client import TwitterClient
import config

if __name__ == '__main__':

    twitter_client = TwitterClient(config.ID_USER)
    helpers = Helpers()

    # Save name from retweets
    list_user = helpers.remove_duplicate(
        helpers.get_names_from_retweet(twitter_client, config.TWEET_AMOUNT))
    helpers.save("users_retweet", list_user)

    # Save id from likes
    list_user2 = helpers.remove_duplicate(
        helpers.get_names_from_likes(twitter_client, config.ID_USER,
                                     config.TWEET_AMOUNT))
    helpers.save("users_likes", list_user2)
	def __init__(self):
		self.TC = TwitterClient()
		self.IC = InstagramClient()
		self.t_entities = None
		self.i_entities = None
		self.music = ["singer", "songwriter", "recording artist", "musician", "vocalist", "band", "player"]
Esempio n. 15
0
import os

# 3rd Party Modules
from flask import Flask, render_template, redirect, request, url_for, session
from flask_dance.contrib.twitter import make_twitter_blueprint, twitter
from pymongo import MongoClient

# Local Modules
from string_analyzer import StringAnalyser
from twitter_client import TwitterClient
from image_analysis import ImageAnalyser
from matcher import Matcher, LOCAL_DOG_IMAGES

# Global Variables
string_analyzer = StringAnalyser()
twitter_client = TwitterClient()
image_analyzer = ImageAnalyser()
matcher = Matcher()
# Create our flask object
app = Flask(__name__, static_url_path="", static_folder="templates/static")
app.secret_key = "supersekrit"
blueprint = make_twitter_blueprint(
    api_key=os.environ['CONSUMER_KEY'],
    api_secret=os.environ['CONSUMER_SECRET'],
)
app.register_blueprint(blueprint, url_prefix="/login")
# Create our database data
DB_NAME = 'woof-are-you'
DB_HOST = 'ds063160.mlab.com'
DB_PORT = 63160
DB_USER = os.environ['MONGO_USER']
Esempio n. 16
0
 def setup_class(cls):
     cls.client = TwitterClient()
Esempio n. 17
0
def detailed_request(hashtag, type, type_count, count_per_type):
    hashtag = '#' + hashtag

    db_result = SearchResult.query.get(hashtag)
    update = db_result != None

    type_count = int(type_count)
    count_per_type = int(count_per_type)
    api = TwitterClient()

    tcountp = 0
    tcountn = 0
    ttcount = 0
    label = []
    count = []
    poslist = []
    neglist = []
    postweet = []
    negtweet = []
    ntext = ''
    ptext = ''

    response_object = SearchResult(hashtag=hashtag, datetime=datetime.now())

    if type == 'day':
        x = datetime.today()
        for key in range(type_count):
            edate = x - timedelta(days=key)
            sdate = x - timedelta(days=key + 1)
            tweets = api.tweet_analysis(query=hashtag,
                                        type='detailed',
                                        until=edate.strftime('%Y-%m-%d'),
                                        since=sdate.strftime('%Y-%m-%d'),
                                        count=count_per_type)
            positive = 0
            negative = 0
            pz = 0
            if len(tweets) > 0:
                ptweets = []
                for tweet in tweets:
                    if tweet['sentiment'] == 1:
                        ptweets.append(tweet)
                        ptext = ptext + " " + tweet['text']
                ntweets = []
                for tweet in tweets:
                    if tweet['sentiment'] == -1:
                        ntweets.append(tweet)
                        ntext = ntext + " " + tweet['text']
                px = len(ptweets)
                py = len(tweets)
                positive = 100 * px / py
                ttcount += py
                tcountp += px
                pz = len(ntweets)
                negative = 100 * pz / py
            label.append(
                str(edate.strftime('%d')) + "/" +
                str(monthret(int(edate.strftime('%m')))))
            count.append(len(tweets))
            poslist.append(positive)
            neglist.append(negative)
            tcountn += pz
            if key == 0:
                try:
                    postweet.append(ptweets[len(ptweets) - 1]['status'])
                except:
                    pass
                try:
                    negtweet.append(ntweets[len(ntweets) - 1]['status'])
                except:
                    pass
                try:
                    postweet.append(ptweets[len(ptweets) - 2]['status'])
                except:
                    pass
                try:
                    negtweet.append(ntweets[len(ntweets) - 2]['status'])
                except:
                    pass
    else:
        x = datetime.now().month
        y1 = datetime.now().year
        for key in range(type_count):
            month1 = x - key
            year1 = y1
            if month1 <= 0:
                year1 = y1 - 1
                month1 += 12
            dates1 = str(year1) + '-' + str(month1) + '-'
            tweetCriteria = got.manager.TweetCriteria().setQuerySearch(hashtag) \
                .setSince(dates1 + "01") \
                .setUntil(dates1 + "28") \
                .setMaxTweets(count_per_type)
            try:
                tweetgot = got.manager.TweetManager.getTweets(tweetCriteria)
                tweets = []
                for tweet in tweetgot:

                    parsed_tweet = {}
                    parsed_tweet[
                        'status'] = f'https://twitter.com/{tweet.username}/status/{tweet.id}'
                    y = clean_tweet(tweet.text)
                    parsed_tweet['text'] = y
                    parsed_tweet['sentiment'] = sentiment_analyzer_scores(y)
                    if tweet.retweets > 0:
                        if parsed_tweet not in tweets:
                            tweets.append(parsed_tweet)
                    else:
                        tweets.append(parsed_tweet)
                positive = 0
                negative = 0
                if len(tweets) > 0:
                    ptweets = []
                    for tweet in tweets:
                        if tweet['sentiment'] == 1:
                            ptweets.append(tweet)
                            ptext = ptext + " " + tweet['text']
                    ntweets = []
                    for tweet in tweets:
                        if tweet['sentiment'] == -1:
                            ntweets.append(tweet)
                            ntext = ntext + " " + tweet['text']
                    positive = 100 * len(ptweets) / len(tweets)
                    negative = 100 * len(ntweets) / len(tweets)
                label.append(str(monthret(month1)) + "/" + str(year1))
                count.append(len(tweets))
                poslist.append(positive)
                neglist.append(negative)
                ttcount += len(tweets)
                tcountp += len(ptweets)
                tcountn += len(ntweets)
                if key == 0:
                    try:
                        postweet.append(ptweets[len(ptweets) - 1]['status'])
                    except:
                        pass
                    try:
                        negtweet.append(ntweets[len(ntweets) - 1]['status'])
                    except:
                        pass
                    try:
                        postweet.append(ptweets[len(ptweets) - 2]['status'])
                    except:
                        pass
                    try:
                        negtweet.append(ntweets[len(ntweets) - 2]['status'])
                    except:
                        pass
            except:
                pass

    response_object.tweet_count = len(tweets)
    response_object.positive = 100 * tcountp / ttcount
    response_object.negative = 100 * tcountn / ttcount
    response_object.positive_wcloud = word_cloud(ptext)
    response_object.negative_wcloud = word_cloud(ntext)

    try:
        response_object.pos_tweet1 = postweet[0]
    except:
        pass
    try:
        response_object.neg_tweet1 = negtweet[0]
    except:
        pass
    try:
        response_object.pos_tweet2 = postweet[1]
    except:
        pass
    try:
        response_object.neg_tweet2 = negtweet[1]
    except:
        pass

    if update == True:
        db_result.datetime = datetime.now()
        db_result.tweet_count = response_object.tweet_count
        db_result.positive = response_object.positive
        db_result.negative = response_object.negative
        db_result.positive_wcloud = response_object.positive_wcloud
        db_result.negative_wcloud = response_object.negative_wcloud
        db_result.pos_tweet1 = response_object.pos_tweet1 or None
        db_result.pos_tweet2 = response_object.pos_tweet2 or None
        db_result.neg_tweet1 = response_object.neg_tweet1 or None
        db_result.neg_tweet2 = response_object.neg_tweet2 or None
        db.session.commit()
    else:
        db.session.add(response_object)
        db.session.commit()

    returndata = {
        "hashtag": hashtag,
        "positive": response_object.positive,
        "negative": response_object.negative,
        "tweetcount": ttcount,
        "datetime": response_object.datetime,
        "label": label,
        "count": count,
        "positive_list": poslist,
        "negative_list": neglist,
        "positive_tweet": postweet,
        "negative_tweet": negtweet,
        "ptweet": tcountp,
        "ntweet": tcountn,
        "positive_wcloud": response_object.positive_wcloud,
        "negative_wcloud": response_object.negative_wcloud
    }
    return jsonify(returndata)
Esempio n. 18
0
config_path = './_config/development.toml'
fee = 0.01  # 手数料


def get_receiver_user_id_str(tweet, receiver_screen_name):
    for mention in tweet["entities"]["user_mentions"]:
        if mention["screen_name"] == receiver_screen_name:
            return mention["id"]

    raise Exception("not match receiver_user")


if __name__ == '__main__':
    config = toml.load(open(config_path))

    t_client = TwitterClient(config["twitter"])
    w_client = WalletClient(config["wallet"])
    d_client = DBClient(config["database"])

    print("Worker Run")

    timeline = t_client.stream_bot_timeline()
    for line in timeline.iter_lines():
        try:
            tweet = json.loads(line.decode("utf-8"))

            tweet_id_str = tweet["id_str"]  # リプライ時に利用する
            sender_user_id_str = tweet["user"]["id_str"]
            sender_user_screen_name = tweet["user"]["screen_name"]
            tweet_dict = tweet["text"].split(" ")
Esempio n. 19
0
def read_post(user_id):
    return statement(
        TwitterClient(db['access_token_key'],
                      db["access_token_secret"]).get_timeline(user_id,
                                                              num_of_posts=20))
Esempio n. 20
0
from flask import Flask, request, render_template, jsonify
from twitter_client import TwitterClient

app = Flask(__name__)
api = TwitterClient()


@app.route('/')
def index():
    return render_template('index.html')


@app.route('/tweets')
def tweets():
    query = request.args.get('query')
    tweets = api.get_tweets(query)
    return jsonify({'data': tweets, 'count': len(tweets)})


port = 5001
app.run(host="0.0.0.0", port=port, debug=True)
Esempio n. 21
0
        if len(word) > 3:
            claim_tokens.append(word)
    claim_tokens.sort()
    print(claim_tokens)
    " ".join(claim_tokens)
    needle = nlp(" ".join(claim_tokens))
    for item in inmem_db.get_items():
        print(item.claim_tokens)
        hay = nlp(" ".join(item.claim_tokens))
        if needle.similarity(hay) > 0.90:
            return '{"result": "' + item.claim_result + '" "source":"' + item.source_url + '"}'
    return '{"result": "Unknown" "source":"www.google.com"}'


@api.route('/message', methods=['POST'])
def post_message():
    print(request)
    if not request.json:
        abort(400)
    return validate(request.json["message"]), 201


if __name__ == "__main__":
    # Initialize feed handlers to popuate the DB
    twitter_client = TwitterClient(TWITTER_KEY, TWITTER_SECRET, TOKEN_KEY,
                                   TOKEN_SECRET)
    source1 = FactCheckIndiaTwitterFeedHandler(twitter_client, inmem_db)
    source1.start()

    # Start the web
    api.run()
Esempio n. 22
0
class Controller(Model):

    # Properties
    twitter = None
    protocol = None
    haikus = []

    # Constructor
    #------------
    def __init__(self):

        # Annoucing
        self.log.header('main:title')

        # Registering Dependencies
        self.twitter = TwitterClient()
        self.saijiki = Saijiki()

    # Methods
    #------------
    def generateHaiku(self, kigo):

        # Passing kigo and tweets to the protocol
        while self.protocol.procede(self.twitter.findTweets(kigo),
                                    kigo) is False:
            self.log.write('controller:not_enough')

        # Haiku is complete
        self.haikus.append(self.protocol.haiku)
        print ''
        print self.protocol.haiku
        print ''

    def generateMultipleHaikus(self, kigo=None, number=1):

        # Checking kigo
        if kigo is None:
            self.log.write('controller:kigo_not_given')
            kigo = self.saijiki.getRandomKigo()

        # Initiating protocol
        self.protocol = Protocol()

        # Looping
        self.log.write('controller:number', variables={'nb': number})
        for i in range(number):
            self.generateHaiku(kigo)

        self.log.write('main:end')

    def generateSaijikiHaikus(self, number=1):

        # Initializing saijiki
        self.log.write('controller:saijiki')

        # Initiating protocol
        self.protocol = Protocol()

        # Looping
        for kigo in self.saijiki.kigo_list:
            for i in range(number):
                self.generateHaiku(kigo)

        self.log.write('main:end')
Esempio n. 23
0
import os
from persistence import Persistence
from twitter_client import TwitterClient
from textblob import TextBlob

from tweet_analyzer import TweetAnalyzer

if __name__ == "__main__":
    #Set Pandas Options to see all Coloumns of a data frame
    pd.set_option('display.max_rows', 500)
    pd.set_option('display.max_columns', 500)
    pd.set_option('display.width', 1000)

    tweet_analyzer = TweetAnalyzer()

    twitter_client = TwitterClient()
    api = twitter_client.get_twitter_client_api()

    persistence = Persistence()

    tweets = api.user_timeline(screen_name="abockelm", count=10)
    df = tweet_analyzer.tweets_to_dataframe(tweets)
    df['sentiment'] = np.array(
        [tweet_analyzer.analyze_sentiment(tweet) for tweet in df['tweets']])

    persistence.save_tweets(df)

    #print(df.head())

    #Timeow()
Esempio n. 24
0
def simple_request(hashtag, count=100):
    hashtag = '#' + hashtag
    count = int(count)
    db_result = SearchResult.query.get(hashtag)
    update = False
    if db_result != None:
        old_time = db_result.datetime
        curr_time = datetime.now()

        time_diff = (curr_time - old_time).total_seconds() // 60
        if time_diff < 1:
            return to_json(SearchResult, db_result)
        else:
            update = True

    api = TwitterClient()
    tweets = api.tweet_analysis(query=hashtag, count=count)

    ptweets = []
    ptext = ''
    ntweets = []
    ntext = ''

    for tweet in tweets:
        if tweet['sentiment'] == 1:
            ptweets.append(tweet)
            ptext = ptext + " " + tweet['text']
        elif tweet['sentiment'] == -1:
            ntweets.append(tweet)
            ntext = ntext + " " + tweet['text']

    response_object = SearchResult(hashtag=hashtag, datetime=datetime.now())

    response_object.tweet_count = len(tweets)
    response_object.positive = 100 * len(ptweets) / len(tweets)
    response_object.negative = 100 * len(ntweets) / len(tweets)
    response_object.positive_wcloud = word_cloud(ptext)
    response_object.negative_wcloud = word_cloud(ntext)

    try:
        response_object.pos_tweet1 = ptweets[len(ptweets) - 1]['status']
    except:
        pass
    try:
        response_object.neg_tweet1 = ntweets[len(ntweets) - 1]['status']
    except:
        pass
    try:
        response_object.pos_tweet2 = ptweets[len(ptweets) - 2]['status']
    except:
        pass
    try:
        response_object.neg_tweet2 = ntweets[len(ntweets) - 2]['status']
    except:
        pass

    if update == True:
        db_result.datetime = datetime.now()
        db_result.tweet_count = response_object.tweet_count
        db_result.positive = response_object.positive
        db_result.negative = response_object.negative
        db_result.positive_wcloud = response_object.positive_wcloud
        db_result.negative_wcloud = response_object.negative_wcloud
        db_result.pos_tweet1 = response_object.pos_tweet1 or None
        db_result.pos_tweet2 = response_object.pos_tweet2 or None
        db_result.neg_tweet1 = response_object.neg_tweet1 or None
        db_result.neg_tweet2 = response_object.neg_tweet2 or None
        db.session.commit()
    else:
        db.session.add(response_object)
        db.session.commit()

    return to_json(SearchResult, response_object)
Esempio n. 25
0
#print(analyzer_load1.get_dataframe())

#tweets = twitter_client.get_user_timeline_tweets(start_date = "2019-05-01", end_date = "2019-05-26", retweets = False, max_id = analyzer_load1.get_dataframe().id.iloc[-1])

#analyzer_load2 = TweetAnalyzer(tweets)
#print(analyzer_load2.get_dataframe())

#Parties = ["afd"]
#Parties = ["die_Gruenen","afd","diepartei"]
#Parties = ["cducsubt", "cdu","csu"]
#Parties = ["spdde"]
Parties = ["die_Gruenen"]
#Parties = ["spdde", "fdp","die_Gruenen","afd","dieLinke","fwlandtag","diepartei","cdu","csu"]

#Parties = ["spdde"]
twitter_client = TwitterClient(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN,
                               ACCESS_TOKEN_SECRET, Parties)

import datetime

EuropawahlDate = datetime.date(2019, 5, 27)
ThreeWeeksBeforeDate = EuropawahlDate - datetime.timedelta(weeks=3)
today = datetime.date.today()

tweets = twitter_client.get_user_timeline_tweets(
    start_date=str(ThreeWeeksBeforeDate), end_date=str(EuropawahlDate))

analyzer = TweetAnalyzer(tweets)

analyzer.tweet_filter_retweets(min_number_of_retweets=25)
#print(analyzer.bagofwords(on="hashtags",extended_view=False))
analyzer.plot_bar(type="hashtags",
Esempio n. 26
0
from datetime import datetime, timedelta
from os import environ
import rabbitmq


from database import Db
from log import get_logger
from twitter_client import TwitterClient

if __name__ == '__main__':
	get_logger('worker').info('creating twitter client')

	twitter_client = TwitterClient(
		consumer_key=environ.get('TWITTER_CONSUMER_KEY', ''),
		consumer_secret=environ.get('TWITTER_CONSUMER_SECRET', ''),
		access_token_key=environ.get('TWITTER_ACCESS_TOKEN_KEY', ''),
		access_token_secret=environ.get('TWITTER_ACCESS_TOKEN_SECRET', ''),
		count_per_search=100
	)

	db = Db(
		environ.get('MONGODB_DATABASE', 'tweet_classify'),
		environ.get('MONGODB_TERMS_COLLECTION', 'terms'),
		environ.get('MONGODB_TWEETS_COLLECTION', 'tweets'),
		environ.get('MONGODB_HOST', 'localhost'),
		int(environ.get('MONGODB_PORT', '27017'))
	)

	get_logger('worker').info('fetching required terms')

	total, terms = db.terms(since=datetime.now() - timedelta(minutes=int(environ.get('FETCHER_CRON_INTERVAL', 10))))
class EntityRecongnitionClient:
	def __init__(self):
		self.TC = TwitterClient()
		self.IC = InstagramClient()
		self.t_entities = None
		self.i_entities = None
		self.music = ["singer", "songwriter", "recording artist", "musician", "vocalist", "band", "player"]

	def get_entities_from_tweets(self, screen_name, count):
		if self.t_entities:
			return self.t_entities
		tweets = self.TC.search_tweets_for_user(screen_name, count)
		entities = []
		for tweet in tweets:
			ent = get_entities(tweet)
			for entry in ent:
				if entry["score"] >= 0.5:
					entities.append(entry)
		self.t_entities = entities
		return self.t_entities 

	def get_entities_from_instagram(self, screen_name):
		if self.i_entities:
			return self.i_entities
		media = self.IC.get_user_media(screen_name)
		captions = []
		for photo in media["items"]:
			captions.append(photo["caption"]["text"])
		entities = []
		for caption in captions:
			ent = get_entities(caption)
			for entry in ent:
				if entry["score"] >= 0.5:
					entities.append(entry)
		self.i_entities = entities
		return self.i_entities

	def get_people(self, sn_twitter, sn_instagram):
		insta = self.get_entities_from_instagram(sn_instagram)
		twttr = self.get_entities_from_tweets(sn_twitter, 100)
		people = []
		for ent in insta:
			if ent["type"] == 'PERSON':
				if " " in ent["name"]:
					people.append(ent["name"])
		for ent in twttr:
			if ent["type"] == 'PERSON':
				if " " in ent["name"]:
					people.append(ent["name"])
		return people

	def search_wikipedia(self, query):
		page = wikipedia.search(query)
		if page:
			return wikipedia.summary(page[0], sentences=1)

	def get_musicians(self, sn_twitter, sn_instagram):
		people = self.get_people(sn_twitter, sn_instagram)
		musicians = []
		for person in people:
			desc = self.search_wikipedia(person)
			if any(terms in desc for terms in self.music):
				musicians.append(person)
		return musicians
Esempio n. 28
0
from time import sleep
from datetime import datetime
from utils import log_and_slack
from twitter_client import TwitterClient
from config import COMPARE_FOLLOWERS_SLEEP_INTERVAL, USERS_TO_MONITOR

if __name__ == '__main__':
    while True:
        for user_name in USERS_TO_MONITOR:
            try:
                print(f"[{datetime.now()}] Refreshing data for {user_name}")

                # TODO: handle authorization error
                twitter_client = TwitterClient(user_name)
                unfollower_ids, new_follower_ids, followers_count = twitter_client.compare_followers_ids(
                )

                # new unfollowers
                if unfollower_ids:
                    unfollower_names = twitter_client.get_names_from_ids(
                        unfollower_ids)

                    # TODO: send to different channels / show avatar
                    log_and_slack(
                        f"@{user_name} unfollowed by: {unfollower_names}")

                # new followers
                if new_follower_ids:
                    pass

                # no changes
Esempio n. 29
0
def main():
	twttr = TwitterClient()
	user_name = 'Miley Cyrus'
	sn = twttr.search_username(user_name)
	twttr.search_tweets_for_user(sn, 50)
	print twttr.aggregate_photos(sn, 50)
	print twttr.user_location(sn)
	print twttr.user_description(sn)
	print twttr.aggregate_hashtags(sn, 1000)
	print twttr.aggregate_retweets(sn, 1000)
class EntityRecongnitionClient:
    def __init__(self):
        self.TC = TwitterClient()
        self.IC = InstagramClient()
        self.t_entities = None
        self.i_entities = None
        self.music = [
            "singer", "songwriter", "recording artist", "musician", "vocalist",
            "band", "player"
        ]

    def get_entities_from_tweets(self, screen_name, count):
        if self.t_entities:
            return self.t_entities
        tweets = self.TC.search_tweets_for_user(screen_name, count)
        entities = []
        for tweet in tweets:
            ent = get_entities(tweet)
            for entry in ent:
                if entry["score"] >= 0.5:
                    entities.append(entry)
        self.t_entities = entities
        return self.t_entities

    def get_entities_from_instagram(self, screen_name):
        if self.i_entities:
            return self.i_entities
        media = self.IC.get_user_media(screen_name)
        captions = []
        for photo in media["items"]:
            captions.append(photo["caption"]["text"])
        entities = []
        for caption in captions:
            ent = get_entities(caption)
            for entry in ent:
                if entry["score"] >= 0.5:
                    entities.append(entry)
        self.i_entities = entities
        return self.i_entities

    def get_people(self, sn_twitter, sn_instagram):
        insta = self.get_entities_from_instagram(sn_instagram)
        twttr = self.get_entities_from_tweets(sn_twitter, 100)
        people = []
        for ent in insta:
            if ent["type"] == 'PERSON':
                if " " in ent["name"]:
                    people.append(ent["name"])
        for ent in twttr:
            if ent["type"] == 'PERSON':
                if " " in ent["name"]:
                    people.append(ent["name"])
        return people

    def search_wikipedia(self, query):
        page = wikipedia.search(query)
        if page:
            return wikipedia.summary(page[0], sentences=1)

    def get_musicians(self, sn_twitter, sn_instagram):
        people = self.get_people(sn_twitter, sn_instagram)
        musicians = []
        for person in people:
            desc = self.search_wikipedia(person)
            if any(terms in desc for terms in self.music):
                musicians.append(person)
        return musicians
Esempio n. 31
0
def main():
    twttr = TwitterClient()
    user_name = 'Miley Cyrus'
    sn = twttr.search_username(user_name)
    twttr.search_tweets_for_user(sn, 50)
    print twttr.aggregate_photos(sn, 50)
    print twttr.user_location(sn)
    print twttr.user_description(sn)
    print twttr.aggregate_hashtags(sn, 1000)
    print twttr.aggregate_retweets(sn, 1000)
Esempio n. 32
0
def post_tweet(tweet):
    return statement(
        TwitterClient(db['access_token_key'],
                      db["access_token_secret"]).post_status(tweet))