def submit_query(): print('Received...') query = request.args.get('query') tweet_count = request.args.get('tweet_count') req = "request received by flask!! Request was " + query + ", fetch " + tweet_count + " tweets" print(req) twitter = Twitter() tweets = twitter.fetch_tweets(query, int(tweet_count)) labels = ['negative', 'positive'] model = Model() results = [] for tweet in tweets: pred = model.predict(tweet) result = { "tweet": tweet, "sentiment": labels[np.argmax(pred)], "confidence": round(pred[0][np.argmax(pred)] * 100, 1) } results.append(result) return jsonify(results)
def main(): twitter = Twitter() tl = twitter.get_my_tl(count=10) pre_time = None time = datetime.now() str_time = str(time).split('.')[0] interval = timedelta(hours=1) for tweet in tl: text = tweet["text"].replace("Error. ", "") try: pre_time = datetime.strptime(text, '%Y-%m-%d %H:%M:%S') except ValueError: continue break if pre_time is None or pre_time + interval < time: twitter.post_tweet("Error. " + str_time)
def main(): twitter_api = Twitter() for twitter_handle in ReviewTable.get_unique_reviewers_twitter(): user_info = twitter_handle.search(twitter_handle) if user_info is None: continue UserInfoTable.insert(user_info_table)
class TestTwitter(unittest.TestCase): def setUp(self): self.twitter = Twitter() def test_get_my_tl(self): tl = self.twitter.get_my_tl(2) self.assertTrue(isinstance(tl, list)) self.assertEqual(len(tl), 2) tweet = tl[0] self.assertTrue(isinstance(tweet, dict)) for key in tweet: print(key)
def main(): Twitter().post_tweet(get_text()) print('Tweeted')
def setUp(self): self.twitter = Twitter()
db_name = db_config['DEFAULT']['DATABASE'] database = Database(db_id, passwd, host, db_name) keywords = database.get_table('keywords') experts = database.get_table('experts') tweets = database.get_table('tweets') twitter_config = configparser.ConfigParser() twitter_config.read('api.ini') consumer_key = twitter_config['twitter']['api_key'] consumer_secret = twitter_config['twitter']['api_secret_key'] access_token_key = twitter_config['twitter']['access_token'] access_token_secret = twitter_config['twitter']['access_token_secret'] twitter = Twitter(consumer_key, consumer_secret, access_token_key, access_token_secret) logger = logging.getLogger('logger') if not logger.handlers: logger.setLevel(logging.INFO) formatter = logging.Formatter( '[ %(levelname)s | %(filename)s: %(lineno)s] %(asctime)s > %(message)s' ) file_handler = logging.FileHandler('log.log') file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.addHandler(stream_handler) preprocessing = Preprocessing()
def poll_pull_requests(api, api_twitter): __log.info("looking for PRs") # get voting window base_voting_window = gh.voting.get_initial_voting_window() # get all ready prs (disregarding of the voting window) prs = gh.prs.get_ready_prs(api, settings.URN, 0) # This sets up a voting record, with each user having a count of votes # that they have cast. try: fp = open('server/voters.json', 'x') fp.close() except: # file already exists, which is what we want pass with open('server/voters.json', 'r+') as fp: total_votes = {} fs = fp.read() if fs: total_votes = json.loads(fs) top_contributors = sorted(gh.repos.get_contributors(api, settings.URN), key=lambda user: user["total"], reverse=True) top_contributors = [ item["author"]["login"].lower() for item in top_contributors ] contributors = set( top_contributors) # store it while it's still a complete list top_contributors = top_contributors[:settings. MERITOCRACY_TOP_CONTRIBUTORS] top_contributors = set(top_contributors) top_voters = sorted(total_votes, key=total_votes.get, reverse=True) top_voters = map(lambda user: user.lower(), top_voters) top_voters = list( filter( lambda user: user not in settings.MERITOCRACY_VOTERS_BLACKLIST, top_voters)) top_voters = set(top_voters[:settings.MERITOCRACY_TOP_VOTERS]) meritocracy = top_voters | top_contributors __log.info("generated meritocracy: " + str(meritocracy)) with open('server/meritocracy.json', 'w') as mfp: json.dump(list(meritocracy), mfp) needs_update = False for pr in prs: pr_num = pr["number"] __log.info("processing PR #%d", pr_num) # gather all current votes votes, meritocracy_satisfied = gh.voting.get_votes( api, settings.URN, pr, meritocracy) # is our PR approved or rejected? vote_total, variance = gh.voting.get_vote_sum( api, votes, contributors) threshold = gh.voting.get_approval_threshold(api, settings.URN) is_approved = vote_total >= threshold and meritocracy_satisfied seconds_since_updated = gh.prs.seconds_since_updated(api, pr) voting_window = base_voting_window # the PR is mitigated or the threshold is not reached ? if variance >= threshold or not is_approved: voting_window = gh.voting.get_extended_voting_window( api, settings.URN) if (settings.IN_PRODUCTION and vote_total >= threshold / 2 and seconds_since_updated > base_voting_window and not meritocracy_satisfied): # check if we need to mention the meritocracy try: commit = pr["head"]["sha"] mm, created = MeritocracyMentioned.get_or_create( commit_hash=commit) if created: meritocracy_mentions = meritocracy - { pr["user"]["login"].lower(), "chaosbot" } gh.comments.leave_meritocracy_comment( api, settings.URN, pr["number"], meritocracy_mentions) except: __log.exception( "Failed to process meritocracy mention") # is our PR in voting window? in_window = seconds_since_updated > voting_window if is_approved: __log.info("PR %d status: will be approved", pr_num) gh.prs.post_accepted_status(api, settings.URN, pr, seconds_since_updated, voting_window, votes, vote_total, threshold, meritocracy_satisfied) if in_window: __log.info("PR %d approved for merging!", pr_num) try: sha = gh.prs.merge_pr(api, settings.URN, pr, votes, vote_total, threshold, meritocracy_satisfied) message_twitter = datetime.datetime.ctime( datetime.datetime.now()) +\ " - PR {pr_num} approved for merging".format(pr_num=pr_num) tw.PostTwitter(message_twitter, api_twitter) # some error, like suddenly there's a merge conflict, or some # new commits were introduced between finding this ready pr and # merging it # Make a tweet except gh.exceptions.CouldntMerge: __log.info( "couldn't merge PR %d for some reason, skipping", pr_num) gh.issues.label_issue(api, settings.URN, pr_num, ["can't merge"]) message_twitter = datetime.datetime.ctime( datetime.datetime.now()) +\ "Couldn't merge PR {pr_num} for some reason, \ skipping" .format(pr_num=pr_num) tw.PostTwitter(message_twitter, api_twitter) continue gh.comments.leave_accept_comment(api, settings.URN, pr_num, sha, votes, vote_total, threshold, meritocracy_satisfied) gh.issues.label_issue(api, settings.URN, pr_num, ["accepted"]) # chaosbot rewards merge owners with a follow pr_owner = pr["user"]["login"] gh.users.follow_user(api, pr_owner) needs_update = True else: __log.info("PR %d status: will be rejected", pr_num) if in_window: gh.prs.post_rejected_status(api, settings.URN, pr, seconds_since_updated, voting_window, votes, vote_total, threshold, meritocracy_satisfied) __log.info("PR %d rejected, closing", pr_num) gh.comments.leave_reject_comment(api, settings.URN, pr_num, votes, vote_total, threshold, meritocracy_satisfied) gh.issues.label_issue(api, settings.URN, pr_num, ["rejected"]) gh.prs.close_pr(api, settings.URN, pr) elif vote_total < 0: gh.prs.post_rejected_status(api, settings.URN, pr, seconds_since_updated, voting_window, votes, vote_total, threshold, meritocracy_satisfied) else: gh.prs.post_pending_status(api, settings.URN, pr, seconds_since_updated, voting_window, votes, vote_total, threshold, meritocracy_satisfied) for user in votes: if user in total_votes: total_votes[user] += 1 else: total_votes[user] = 1 if fs: # prepare for overwriting fp.seek(0) fp.truncate() json.dump(total_votes, fp) # flush all buffers because we might restart, which could cause a crash os.fsync(fp) # we approved a PR, restart if needs_update: __log.info("updating code and requirements and restarting self") startup_path = join(THIS_DIR, "..", "startup.sh") # before we exec, we need to flush i/o buffers so we don't lose logs or voters sys.stdout.flush() sys.stderr.flush() os.execl(startup_path, startup_path) __log.info("Waiting %d seconds until next scheduled PR polling event", settings.PULL_REQUEST_POLLING_INTERVAL_SECONDS)
def main(): twitter = Twitter() tl = twitter.get_my_tl(count=10) interval = timedelta(hours=1) for item in ITEM: name, url = item pre_time = None pre_price = None for tweet in tl: text = tweet['text'].split('\n') if len(text) == 4 and text[2] == name: try: pre_time = datetime.strptime(text[0], '%Y-%m-%d %H:%M:%S') except ValueError: pre_time = None continue pre_price = text[1] break r = requests.get(url) soup = BeautifulSoup(r.text, 'html.parser') our_price = soup.find(id='priceblock_ourprice') time = datetime.now() str_time = str(time).split('.')[0] if our_price is None: template = str_time + '\nSOLD OUT\n' + name + '\n' + url if pre_time is None: twitter.post_tweet(template) elif pre_price != 'SOLD OUT': twitter.post_tweet(template) elif pre_time + interval < time: twitter.post_tweet(template) continue price = our_price.get_text() int_price = int(re.sub(r'\D', '', price)) template = str_time + '\n' + price + '\n' + name + '\n' + url if int_price > 35000: if pre_time is None: twitter.post_tweet(template) elif pre_price != price: twitter.post_tweet(template) elif pre_time + interval < time: twitter.post_tweet(template) elif int_price > 20000: twitter.post_tweet('@tnktakuma\n' + template) else: twitter.post_tweet('ERROR\n' + template)
#!/usr/bin/env python import csv import logging import sys logger = logging.getLogger() handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) sys.path.append('/Users/lorenamesa/Desktop/pyten/lib/python2.7/site-packages/') from twitter_api import Twitter twitter = Twitter() user_timeline = twitter.get_user_timeline(user_id=None, screen_name="cta") with open("/Users/lorenamesa/Desktop/pytennessee/cta_tweet_data.csv", "ab") as csvdata: if user_timeline: logging.info("Writing tweets for cta timeline data...") wr = csv.writer(csvdata, dialect='excel') for tweet in user_timeline: print tweet.__dict__.keys() # ['tweet_id', 'created_at', 'text'] wr.writerow(tweet.__dict__.values())