def __init__(self, debug=False): self.tl_responder = TimeLineReply() self.reply_responder = Reply() self.twitter = Twitter() self.db = redis.db('twitter') if not self.db.exists('latest_tl_replied'): self.db.set('latest_tl_replied', '(;´Д`)') self.debug = debug
class TwitterResponder(object): def __init__(self, debug=False): self.tl_responder = TimeLineReply() self.reply_responder = Reply() self.twitter = Twitter() self.db = redis.db('twitter') if not self.db.exists('latest_tl_replied'): self.db.set('latest_tl_replied', '(;´Д`)') self.debug = debug @staticmethod def is_duplicate_launch(): result = misc.command('pgrep -fl python|grep "atango.py -j twitter_respond"', True) return bool(result[1].splitlines()) def respond(self, instance, tweet, tl=False): response = instance.respond(tweet) if response: self.twitter.post(response['text'], response['id'], response.get('media[]'), debug=self.debug) if not self.debug: self.twitter.update_latest_replied_id(response['id']) if tl: self.db.set('latest_tl_replied', response['text'].split(' ')[0]) def is_valid_tweet(self, text): if text.startswith('@sw_words'): return True return not ('@' in text or '#' in text or 'RT' in text or 'http' in text) def run(self): if self.is_duplicate_launch(): logger.debug('TwitterResponder is already launched') return -1 last_time = time.time() for tweet in self.twitter.stream_filter(): if 'text' in tweet: if tweet['text'].startswith('@sw_words'): self.respond(self.reply_responder, tweet) elif (np.random.randint(100) < RESPONDING_PROBABILITY and self.is_valid_tweet(tweet['text']) and self.db.get('latest_tl_replied') != tweet['user']['screen_name']): self.respond(self.tl_responder, tweet, tl=True) if time.time() - last_time > TWO_MINUTES: mentions = self.twitter.api.statuses.mentions_timeline(count=200) for mention in mentions[::-1]: self.respond(self.reply_responder, mention) last_time = time.time()
class TwitterResponder(object): def __init__(self, debug=False): self.tl_responder = TimeLineReply() self.reply_responder = Reply() self.twitter = Twitter() self.db = redis.db("twitter") if not self.db.exists("latest_tl_replied"): self.db.set("latest_tl_replied", "(;´Д`)") self.debug = debug @staticmethod def is_duplicate_launch(): result = misc.command('pgrep -fl python|grep "atango.py -j twitter_respond"', True) return bool(result[1].splitlines()) def respond(self, instance, tweet, tl=False): response = instance.respond(tweet) if response: self.twitter.post(response["text"], response["id"], response.get("media[]"), debug=self.debug) if not self.debug: self.twitter.update_latest_replied_id(response["id"]) if tl: self.db.set("latest_tl_replied", response["text"].split(" ")[0]) def is_valid_tweet(self, text): if text.startswith("@sw_words"): return True return not ("@" in text or "#" in text or "RT" in text or "http" in text) def run(self): if self.is_duplicate_launch(): logger.debug("TwitterResponder is already launched") return -1 last_time = time.time() for tweet in self.twitter.stream_api.user(): if "text" in tweet: if tweet["text"].startswith("@sw_words"): self.respond(self.reply_responder, tweet) elif ( np.random.randint(100) < RESPONDING_PROBABILITY and self.is_valid_tweet(tweet["text"]) and self.db.get("latest_tl_replied") != tweet["user"]["screen_name"] ): self.respond(self.tl_responder, tweet, tl=True) if time.time() - last_time > TWO_MINUTES: mentions = self.twitter.api.statuses.mentions_timeline(count=200) for mention in mentions[::-1]: self.respond(self.reply_responder, mention) last_time = time.time()
def __init__(self, debug=False): self.tl_responder = TimeLineReply() self.reply_responder = Reply() self.twitter = Twitter() self.db = redis.db("twitter") if not self.db.exists("latest_tl_replied"): self.db.set("latest_tl_replied", "(;´Д`)") self.debug = debug
def twitter_search(request): query = request.GET.get('query', None) print 'Query: %s' % query tweets = None if query: t = Twitter() result = t.search(query) tweets = result['results'] data = { 'title': 'My Title', 'query': query, 'tweets': tweets } return TemplateResponse(request, 'twitter/search.html', data)
def __init__(self, verbose=False, debug=False): self.twitter = Twitter() self.verbose = verbose self.debug = debug
class Atango(App): def __init__(self, verbose=False, debug=False): self.twitter = Twitter() self.verbose = verbose self.debug = debug def run(self, job): self.setup_logger(job) if job == 'wordmap': from job.wordmap import WordMap wm = WordMap() (text, image) = self.execute(wm.run, hour=1) self.twitter.post(text, image=image, debug=self.debug) elif job == 'food': from job.clause_extractor import FoodExtractor e = FoodExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'okazu': from job.clause_extractor import OkazuExtractor e = OkazuExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'url': from job.popular_url import PopularUrl pop_url = PopularUrl(debug=self.debug) for (i, message) in enumerate(self.execute(pop_url.run, 2), start=1): self.twitter.post(message, debug=self.debug) if i >= 3: break elif job == 'ome': from job.ome import Ome ome = Ome() for message in self.execute(ome.run, 20): self.twitter.post(message, debug=self.debug) elif job == 'summarize': from job.popular_post import PopularPost pp = PopularPost() result = self.execute(pp.run) self.twitter.post(result, debug=self.debug) elif job == 'markov': from job.markov import MarkovTweet mt = MarkovTweet() result = self.execute(mt.run, 60) self.twitter.post(result, debug=self.debug) elif job == 'twitter_respond': from job.twitter_respond import TwitterResponder crawler = TwitterResponder(debug=self.debug) crawler.run() elif job == 'elasticsearch_update': from job.elasticsearch_update import ElasticSearchUpdate updater = ElasticSearchUpdate() updater.run() elif job == 'haiku': from lib import file_io, misc haiku_list = file_io.read('haiku.txt', data=True) haiku = misc.choice(haiku_list) + ' #くわ川柳' self.twitter.post(haiku, debug=self.debug) elif job == '575': from job.n575 import Senryu s = Senryu() result = self.execute(s.run) self.twitter.post(result, debug=self.debug) elif job == 'dialogue': from job.reply import Reply reply = Reply() tweet = {'id': 1 << 128, 'user': {'id': 0, 'name': 'まんこ', 'screen_name': 'manko'}, 'created_at': '2015-03-09', 'source': 'm'} while True: tweet['text'] = input() print(reply.respond(tweet)) elif job == 'friends': from job.friends import TwitterFriendsUpdater tfu = TwitterFriendsUpdater() tfu.run() else: raise ValueError('"%s" is not implemented yet' % job)
class Atango(App): def __init__(self, verbose=False, debug=False): self.twitter = Twitter() self.verbose = verbose self.debug = debug def run(self, job): self.setup_logger(job) if job == 'wordcount': from job.wordcount.wordcount import WordCount up_flickr = not self.debug wc = WordCount(plot_wordmap=True, up_flickr=up_flickr) self.twitter.post(self.execute(wc.run, hour=1), debug=self.debug) elif job == 'food': from job.clause_extractor import FoodExtractor e = FoodExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'okazu': from job.clause_extractor import OkazuExtractor e = OkazuExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'url': from job.popular_url import PopularUrl pop_url = PopularUrl(debug=self.debug) for (i, message) in enumerate(self.execute(pop_url.run, 2), start=1): self.twitter.post(message, debug=self.debug) if i >= 3: break elif job == 'ome': from job.ome import Ome ome = Ome() for message in self.execute(ome.run, 20): self.twitter.post(message, debug=self.debug) elif job == 'summarize': from job.popular_post import PopularPost pp = PopularPost() result = self.execute(pp.run) self.twitter.post(result, debug=self.debug) elif job == 'markov': from job.markov import MarkovTweet mt = MarkovTweet() result = self.execute(mt.run, 60) self.twitter.post(result, debug=self.debug) elif job == 'twitter_respond': from job.twitter_respond import TwitterResponder crawler = TwitterResponder(debug=self.debug) crawler.run() elif job == 'elasticsearch_update': from elasticsearch_update import ElasticSearchUpdate updater = ElasticSearchUpdate() updater.run() elif job == 'cputemp': from job.cputemp import CpuTemperatureChecker temp_checker = CpuTemperatureChecker() message = temp_checker.run() if message: self.twitter.post(message, debug=self.debug) elif job == 'dialogue': from job.reply import Reply reply = Reply() tweet = {'id': 1 << 128, 'user': {'id': 0, 'name': 'まんこ', 'screen_name': 'manko'}, 'created_at': '2015-03-09', 'source': 'm'} while True: tweet['text'] = input() print(reply.respond(tweet)) else: raise ValueError('"%s" is not implemented yet' % job)
def __init__(self): self.twitter = Twitter()