def dialogue(): def explicit_fword(text): for (implicit, explicit) in fwords.items(): text = text.replace(implicit, explicit) return text _input = request.args.get('text') _input = explicit_fword(_input) uid = compute_id(request) db = redis.db('twitter') user_info = db.get('user:%s' % uid) if user_info: user_info = json.loads(user_info.decode('utf8')) user_info['tweets'].append(_input) else: user_info = {'replies': [], 'tweets': [_input]} user_info.update({'screen_name': '貴殿', 'name': '貴殿'}) rep = Reply() response = rep.make_response(_input, user_info) if len(user_info['replies']) >= 20: user_info['replies'].pop(0) if len(user_info['tweets']) >= 20: user_info['tweets'].pop(0) user_info['replies'].append(response['text']) db.setex('user:%s' % uid, json.dumps(user_info), TWO_WEEK) response['text'] = normalize.remove_emoticon(response['text']) response['text'] = response['text'].strip() return response['text']
def __init__(self, debug=False): self.tl_responder = TimeLineReply() self.reply_responder = Reply() self.twitter = Twitter() self.db = redis.db('twitter') if not self.db.exists('latest_tl_replied'): self.db.set('latest_tl_replied', '(;´Д`)') self.debug = debug
def run(self, job): self.setup_logger(job) if job == 'wordmap': from job.wordmap import WordMap wm = WordMap() (text, image) = self.execute(wm.run, hour=1) self.twitter.post(text, image=image, debug=self.debug) elif job == 'food': from job.clause_extractor import FoodExtractor e = FoodExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'okazu': from job.clause_extractor import OkazuExtractor e = OkazuExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'url': from job.popular_url import PopularUrl pop_url = PopularUrl(debug=self.debug) for (i, message) in enumerate(self.execute(pop_url.run, 2), start=1): self.twitter.post(message, debug=self.debug) if i >= 3: break elif job == 'ome': from job.ome import Ome ome = Ome() for message in self.execute(ome.run, 20): self.twitter.post(message, debug=self.debug) elif job == 'summarize': from job.popular_post import PopularPost pp = PopularPost() result = self.execute(pp.run) self.twitter.post(result, debug=self.debug) elif job == 'markov': from job.markov import MarkovTweet mt = MarkovTweet() result = self.execute(mt.run, 60) self.twitter.post(result, debug=self.debug) elif job == 'twitter_respond': from job.twitter_respond import TwitterResponder crawler = TwitterResponder(debug=self.debug) crawler.run() elif job == 'elasticsearch_update': from job.elasticsearch_update import ElasticSearchUpdate updater = ElasticSearchUpdate() updater.run() elif job == 'haiku': from lib import file_io, misc haiku_list = file_io.read('haiku.txt', data=True) haiku = misc.choice(haiku_list) + ' #くわ川柳' self.twitter.post(haiku, debug=self.debug) elif job == '575': from job.n575 import Senryu s = Senryu() result = self.execute(s.run) self.twitter.post(result, debug=self.debug) elif job == 'dialogue': from job.reply import Reply reply = Reply() tweet = {'id': 1 << 128, 'user': {'id': 0, 'name': 'まんこ', 'screen_name': 'manko'}, 'created_at': '2015-03-09', 'source': 'm'} while True: tweet['text'] = input() print(reply.respond(tweet)) elif job == 'friends': from job.friends import TwitterFriendsUpdater tfu = TwitterFriendsUpdater() tfu.run() else: raise ValueError('"%s" is not implemented yet' % job)
class test_Reply(object): def __init__(self): self.rep = Reply() def test_is_valid_tweet(self): TWEET_MOCK = { 'id': 0, 'user': {'screen_name': ''}, 'text': '', 'source': '' } tweet = TWEET_MOCK.copy() with mock.patch('lib.api.Twitter.get_latest_replied_id') as m: m.return_value = 1 assert self.rep.is_valid_tweet(tweet), (False == 'is old') tweet['id'] = 2 assert self.rep.is_valid_tweet(tweet), (True == 'OK') tweet['user']['screen_name'] = 'sw_words' assert self.rep.is_valid_tweet(tweet), (False == 'is NG screen name') tweet['user']['screen_name'] = '' tweet['text'] = 'レスしなくていい' assert self.rep.is_valid_tweet(tweet), (False == 'has NG word') tweet['text'] = '' tweet['source'] = 'paper.li' assert self.rep.is_valid_tweet(tweet), (False == 'is written by NG source') def test_normalize(self): tweet = '@sw_words ぁ単語は糞だな http://omanko' assert self.rep.normalize(tweet) == '貴殿は糞だな' def test_replace_name(self): tweet = '<ENEMA>\%sn</ENEMA>' userinfo = {'screen_name': 'akari', 'name': '神岸あかり'} actual = self.rep.replace_name(tweet, userinfo) assert actual == '<ENEMA>akari</ENEMA>' tweet = '<ENEMA>%name</ENEMA>' actual = self.rep.replace_name(tweet, userinfo) assert actual == '<ENEMA>神岸あかり</ENEMA>' def test_get_userinfo(self): db = redis.db('twitter') db.delete('user:0') tweet = {'id': 0, 'user': {'id': 0, 'name': 'まんこ', 'screen_name': 'manko'}, 'text': 'おまんこ', 'created_at': '2015-03-09', 'source': 'm'} actual = self.rep.get_userinfo(tweet) desired = {'name': 'まんこ', 'screen_name': 'manko', 'tweets': ['おまんこ'], 'replies': []} assert actual == desired userinfo = {'name': 'まんこ', 'screen_name': 'manko', 'tweets': ['おまんこ', 'まんこ'], 'replies': ['manko', 'omanko']} db.set('user:0', json.dumps(userinfo)) actual = self.rep.get_userinfo(tweet) userinfo['tweets'] = ['おまんこ', 'まんこ', 'おまんこ'] assert actual == userinfo @nottest def test_respond(self): pass @nottest def test_run(self): pass
def __init__(self): self.rep = Reply()
def run(self, job): self.setup_logger(job) if job == 'wordcount': from job.wordcount.wordcount import WordCount up_flickr = not self.debug wc = WordCount(plot_wordmap=True, up_flickr=up_flickr) self.twitter.post(self.execute(wc.run, hour=1), debug=self.debug) elif job == 'food': from job.clause_extractor import FoodExtractor e = FoodExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'okazu': from job.clause_extractor import OkazuExtractor e = OkazuExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'url': from job.popular_url import PopularUrl pop_url = PopularUrl(debug=self.debug) for (i, message) in enumerate(self.execute(pop_url.run, 2), start=1): self.twitter.post(message, debug=self.debug) if i >= 3: break elif job == 'ome': from job.ome import Ome ome = Ome() for message in self.execute(ome.run, 20): self.twitter.post(message, debug=self.debug) elif job == 'summarize': from job.popular_post import PopularPost pp = PopularPost() result = self.execute(pp.run) self.twitter.post(result, debug=self.debug) elif job == 'markov': from job.markov import MarkovTweet mt = MarkovTweet() result = self.execute(mt.run, 60) self.twitter.post(result, debug=self.debug) elif job == 'twitter_respond': from job.twitter_respond import TwitterResponder crawler = TwitterResponder(debug=self.debug) crawler.run() elif job == 'elasticsearch_update': from elasticsearch_update import ElasticSearchUpdate updater = ElasticSearchUpdate() updater.run() elif job == 'cputemp': from job.cputemp import CpuTemperatureChecker temp_checker = CpuTemperatureChecker() message = temp_checker.run() if message: self.twitter.post(message, debug=self.debug) elif job == 'dialogue': from job.reply import Reply reply = Reply() tweet = {'id': 1 << 128, 'user': {'id': 0, 'name': 'まんこ', 'screen_name': 'manko'}, 'created_at': '2015-03-09', 'source': 'm'} while True: tweet['text'] = input() print(reply.respond(tweet)) else: raise ValueError('"%s" is not implemented yet' % job)