def index(): """ Input: No user input, but uses articles.csv and tweets.csv Output: Creates the page with the tweets and ny times articles that result from a given search """ if request.method == "POST": twitter.search(request.form['searchterm']) nyt.search(request.form['searchterm']) f = open('tweets.csv', 'r') tweets = f.read() f.close() tweets = tweets.decode('utf-8') g = open('articles.csv', 'r') articles = g.read() g.close() articles = articles.decode('utf-8') return render_template("index.html", twitter=tweets, nyt=articles) return render_template("index.html", twitter='No Search Has Been Done', nyt='No Search Has Been Done')
def search(): if request.method=="POST": twitter.search(request.form['searchterm']) nyt.search(request.form['searchterm']) f = open('tweets.csv','r') tweets = f.read() f.close() tweets = tweets.decode('utf-8') g = open('articles.csv','r') articles = g.read() g.close() articles = articles.decode('utf-8') return render_template("search.html",twitter=tweets,nyt=articles) return render_template("search.html",twitter='No Search Has Been Done',nyt='No Search Has Been Done')
def perform_search(): data = twitter.search(account, 'Privacy OR Apple from:RepTedLieu', parameters={'result_type': 'mixed'}) for status in data['statuses']: print('> user: '******'user']['screen_name']) print(status['text']) print("________________________>")
def test_search_data_decoded(self, urlopen): """Search results are JSON decoded, and only the tweet content is returned""" urlopen.return_value = self.twitter_results() a = search([]) eq_(a, ['text'])
def twittersearch(bot, data): """Search twitter feeds for a term. use ! to send result to channel, and @ to receive as personal message """ if data["message"][0] == "!": query = data["message"].replace("!twitter ", "") destination = "to" else: query = data["message"].replace("@twitter ", "") destination = "from" try: results = twitter.search(query) except: bot.send("err... something happened, that wasn't meant to", channel=data[destination]) return if (results == []): bot.send("No twitter search results for \"{}\"".format(query), channel=data[destination]) return bot.send("Twitter search results for \"{}\":".format(query), channel=data["to"]) for result in results: bot.send("* {}: {}".format(result[0].encode('utf-8'), result[1].encode('utf-8')), channel=data["to"])
def main_page(): if request.method == 'POST': hashtag = '#' + request.form['user_search'] city = request.form['city'].title() radius = request.form['radius'] tweets = search(hashtag, city, radius) if len(tweets) == 0: return render_template('no_results.html', city=city, topic=request.form['user_search']) sentiments = [] for tweet in tweets: sentiments.append(analyze(tweet)) total_score = 0 total_magnitude = 0 for score, magnitude in sentiments: total_score += score total_magnitude += magnitude avg_score = total_score / len(sentiments) avg_magnitude = total_magnitude / len(sentiments) sentiment = overall_sentiment(avg_score, avg_magnitude) return render_template('results.html', city=city, sentiment=sentiment, topic=request.form['user_search'], tweets=tweets) else: return render_template('search.html')
def read_twitter(keyword): latest_id = 1 while scrolling_images != None: try: entries = twitter.search(keyword, latest_id) # entries = twitter.list("shimariso", "subcul", latest_id) entries.reverse() for entry in entries: img = render_entry(entry) if img != None: lock.acquire() scrolling_images.append({"time": entry["time"], "img": img}) lock.release() if latest_id < entry["id"]: latest_id = entry["id"] except Exception, e: print "Exception(%s)" % (e) lock.acquire() ts = get_average_time_span(scrolling_images) if ts == None: ts = 1 # print "Number of buffered tweets = %d, avg=%d" % (len(scrolling_images), ts) lock.release() time.sleep(min(30, max(3, ts)))
def index(request): tweets = search(tags["all"], lang=request.LANG) if len(tweets) < 15: extra = search(tags["all"], "all") tweets.extend(extra) # we only want 15 tweets tweets = tweets[:15] teams = dict((813, t) for t in teams_config) for persona in Persona.objects.filter(persona_id__in=teams.keys()): teams[persona.persona_id]["persona"] = persona return jingo.render( request, "firefoxcup/index.html", {"tweets": tweets, "teams": teams.values(), "email_enabled": email_enabled} )
def index(): if (request.method == "GET"): return render_template("index.html") else: query = request.form['search'] twitter_results = twitter.search(query) tweets = twitter.get_embedded_tweets(twitter_results) results = concepts.getConcepts(query) return render_template("index.html", keyword_list=results, query=query, tweets=tweets)
def twitter_words(): for kw in TWITTER_KEYWORDS: for result in twitter.search(kw,n=1000): for word in re.split('\s+',result['text']): if word[0]=='@' or word[:4]=='http': continue elif word[0]=='#': word = word[1:] try: yield str(word) except UnicodeEncodeError: pass
def search(): tweets = "" wiki = "" playlist = "" recs = "" result = "" if 'q' in request.args: wiki = wiki1.search(request.args['q']) playlist = "<a href=" + spotify.get_playlist( request.args['q']) + ">here</a>" tweets = twitter.search(request.args['q']) recs = spotify.get_recommendations(request.args['q']) result = "<h2/>Summary:<br/></h2/>" + wiki + "<br/><h2/>For Playlist Click " + playlist + "</h2/><h2/>TWEETS:<br/></h2/>" + tweets + "<br/><h2/>RECOMENDED FOR YOU</h2/>" + "<h2/>" + recs + "</h2/>" return result return "WRONG"
def index(): """ Input: No user input, but uses articles.csv and tweets.csv Output: Creates the page with the tweets and ny times articles that result from a given search """ if request.method=="POST": twitter.search(request.form['searchterm']) nyt.search(request.form['searchterm']) f = open('tweets.csv','r') tweets = f.read() f.close() tweets = tweets.decode('utf-8') g = open('articles.csv','r') articles = g.read() g.close() articles = articles.decode('utf-8') return render_template("index.html",twitter=tweets,nyt=articles) return render_template("index.html",twitter='No Search Has Been Done',nyt='No Search Has Been Done')
def continueFromFacebook(query, facebookID, name): statuses = facebook.recentStatus(facebookID, query) streams = facebook.recentStream(facebookID, query) #locations = facebook.recentLocationPost(facebookID, query) #photos = facebook.recentPhotos(facebookID, query) #fbResults = {"statuses": statuses, "streams": streams, "locations": locations, "photos": photos} fbResults = {"statuses": statuses, "streams": streams} result = twitter.search(name) if result["numMatches"] == 0: result["actions"]=["DoAgain"] elif result["numMatches"] > 1: result["actions"]=["Choose"] else: result = continueFromTwitter(query, fbResults, result["matches"][0]["id"], result["matches"][0]["name"]) return result
def handler(event, context): try: logger.info("msg='Starting search for tags.'") start_time = time.time() tags = read_tags('TAGS') data = search(tags) rds.run(data) seconds = time.time() - start_time logger.info(f"msg='Job done' exec_time={seconds:.2f}") except Exception as e: raise e return { "isBase64Encoded": 'false', "statusCode": 200, "headers": {}, "body": "Twitter Scan Done!" }
def news(): return jsonify({}) feed = [] for tweet in g.user.twitters: feed.append(twitter.search(q='#' + tweet.handle)) fb = [] #get_blob('https://graph.facebook.com/google/feed') reddits = {} threads = [] for page in g.user.pages: reddits[page.name] = {} for sub in ['technology', 'news', 'worldnews' ]: thread = Thread(target=get_sub_reddit, args=(reddits[page.name], sub, page.name)) thread.start() threads.append(thread) for thread in threads: thread.join() for page_name, subreddits in reddits.items(): reddits[page_name] = reduce(add, subreddits.values(), []) #news=get_blob('https://api.usatoday.com/open/articles/topnews?search=google&api_key=asgn54b69rg7699v5skf8ur9') return jsonify(reddit=reddits, fb=fb, feed=feed)
def GET(self, name): logging.debug(web.data()) search = web.input(search="").search logging.debug('sentence: ' + search) start_time = current_milli_time() result = {} twitter_search = twitterClient.search(search) for idx, twit in enumerate(twitter_search): result[str(idx)] = {'text': twit, 'rate': sentiment.predict(twit)} res = { 'status': 'ok', 'took': current_milli_time() - start_time, 'result': result } web.header('Content-Type', 'application/json') return json.dumps(res, sort_keys=True, indent=4, separators=(',', ': '))
# -*- coding: utf-8 -*- """ Created on Mon Apr 20 13:09:34 2015 @author: acbart """ from pprint import pprint import twitter twitter.connect() twitter.connect('e0gtVH5Jr4cX9OtS9jNbijiUn', 'NGIeGI4CiekJggGhzif05EPHnJnwbJnePsWKqpznImoFfKcY49', '342536207-dbt2eFduh9wiOprmKXsNTo7x5TM3vp7c6UeRaDxd', 'JEZr4VKRFmOySkJoa11x5LNv8IgBA141KlNkzK0LoKoEX') corgi_tweets = twitter.search("#corgis") data = [tweet['retweets'] for tweet in corgi_tweets] pprint(corgi_tweets)
def twitterSearch(query): return twitter.search(query)
def perform_search(): data = twitter.search(account, 'Privacy OR Apple from:RepTedLieu', parameters={'result_type': 'mixed'}) print('\n'.join(fmt.format(**status) for status in data['statuses']))
def perform_search(): data = twitter.search(account, "Privacy OR Apple from:RepTedLieu", parameters={"result_type": "mixed"}) print("\n".join(fmt.format(**status) for status in data["statuses"]))
import twitter twitter = twitter.Twitter() tweet_list = twitter.search("$TWTR") for tweet in tweet_list: print('@%s on %s tweeted: %s' % (tweet['user']['screen_name'], tweet['created_at'], tweet['text']))
#!/usr/bin/env python # -*- coding: UTF-8 -*- import time from time import strftime import cgitb,cgi import db import json import twitter,watson cgitb.enable() print("Content-Type: application/json;charset=utf-8") print() query_string = cgi.FieldStorage().getvalue('q').upper() #case insensitive if query_string is None: print("{'error':'Empty query'}") else: tweets=twitter.search(query_string) db.store(query_string,time.time(),tweets,watson.analyze(tweets)) ret=db.search(query_string) result=[] for doc in ret: result.append({"id":doc["_id"], "time":doc["time"]}) print(json.dumps(result))
def test(self): res = twitter.search('python') print(res) self.assertGreater(len(res), 1)
from twitter import tweet from twitter import search tweet('cool') for result in search('@lelBotz'): print(result)