def give_present(*arg): present_list = file_io.read('present.txt') sentence = misc.choice(present_list) while ('集計' in sentence or 'シュウケイ' in sentence or 'を' not in sentence or sentence.endswith('萌え') or len(sentence) < 3): sentence = misc.choice(present_list) present = normalize.remove_emoticon(sentence) present = present[:-1] if present.endswith('を') else present search_result = google_image.search(present) if 'images' in search_result: for url in search_result['images']: if url.endswith(('.jpg', '.gif', '.png')): try: web.download(url, '/tmp/present') break except: continue sentence = normalize.normalize(sentence) return {'text': u'%nameに' + sentence, 'media[]': '/tmp/present'}
def give_present(*arg): present_list = file_io.read('present.txt', data=True) sentence = misc.choice(present_list) while ('集計' in sentence or 'シュウケイ' in sentence or 'を' not in sentence or sentence.endswith('萌え') or len(sentence) < 3): sentence = misc.choice(present_list) present = normalize.remove_emoticon(sentence) present = present.replace('!', '').replace('!', '') present = present.replace('漏れの', '').replace('俺の', '').replace('俺が', '') present = present[:-1] if present.endswith('を') else present search_result = google_image.search(present) if 'images' in search_result: for url in search_result['images']: if url.endswith(('.jpg', '.gif', '.png')): try: web.download(url, '/tmp/present') break except: continue sentence = normalize.normalize(sentence) return {'text': '%nameに' + sentence, 'media[]': '/tmp/present'}
def give_valentine_present(*arg): present_list = file_io.read('valentine.txt') present = misc.choice(present_list) search_result = google_image.search(present) if 'images' in search_result: for url in search_result['images']: if not url.endswith(('.jpg', '.gif', '.png')): continue try: web.download(url, '/tmp/present') break except: continue present = normalize.normalize(present) return {'text': '%nameに' + present + 'をヽ(´ー`)ノ', 'media[]': '/tmp/present'}
def give_valentine_present(*arg): if random.randint(0, 11) > 8: icon_url = arg[1]['icon'].replace('_normal', '') filename = icon_url.split('/')[-1] web.download(icon_url, '/tmp/%s' % (filename)) misc.command('%s evaluate.py --checkpoint ../../data/ckpt ' % (PYTHON_EXE_PATH) + '--in-path /tmp/%s --out-path /tmp/%s' % (filename, filename), shell=True, allow_err=True, cwd=STYLE_TRANSFER_PATH) return {'text': '%nameをチョコにしてやろうか!(゚Д゚)', 'media[]': '/tmp/%s' % (filename)} pid = random.randint(0, 59) xml = web.open_url(SAFEBOORU_URL % pid) soup = BeautifulSoup(xml, 'lxml') post = misc.choice(soup.find_all('post')) image_url = 'https:' + post['file_url'] web.download(image_url, '/tmp/present') suffix = '!' * random.randint(0, 59) return {'text': '%nameにチョコをヽ(´ー`)ノ' + suffix, 'media[]': '/tmp/present'}
def generate(self, words, min_length): (markov_dict, unique_words) = self.generate_markov_model(words) word_ids = defaultdict(lambda: len(word_ids)) for x in unique_words: word_ids[x] sentence = '' (prev, interest) = '', BOS word = '' while True: if not markov_dict[(prev, interest)]: (prev, interest) = '', BOS word = misc.choice(markov_dict[(prev, interest)]) prev, interest = interest, word if word == EOS and min_length < len(sentence) < 140: if sentence.count('(;´Д`)') == 2 and sentence[-6:] == '(;´Д`)': sentence = sentence[:-6] return sentence elif len(sentence) > 120: sentence = '' (prev, interest) = '', BOS elif word != BOS and word != EOS: sentence += unique_words[word_ids[word]].split(',')[0]
def run(self, job): self.setup_logger(job) if job == 'wordmap': from job.wordmap import WordMap wm = WordMap() (text, image) = self.execute(wm.run, hour=1) self.twitter.post(text, image=image, debug=self.debug) elif job == 'food': from job.clause_extractor import FoodExtractor e = FoodExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'okazu': from job.clause_extractor import OkazuExtractor e = OkazuExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'url': from job.popular_url import PopularUrl pop_url = PopularUrl(debug=self.debug) for (i, message) in enumerate(self.execute(pop_url.run, 2), start=1): self.twitter.post(message, debug=self.debug) if i >= 3: break elif job == 'ome': from job.ome import Ome ome = Ome() for message in self.execute(ome.run, 20): self.twitter.post(message, debug=self.debug) elif job == 'summarize': from job.popular_post import PopularPost pp = PopularPost() result = self.execute(pp.run) self.twitter.post(result, debug=self.debug) elif job == 'markov': from job.markov import MarkovTweet mt = MarkovTweet() result = self.execute(mt.run, 60) self.twitter.post(result, debug=self.debug) elif job == 'twitter_respond': from job.twitter_respond import TwitterResponder crawler = TwitterResponder(debug=self.debug) crawler.run() elif job == 'elasticsearch_update': from job.elasticsearch_update import ElasticSearchUpdate updater = ElasticSearchUpdate() updater.run() elif job == 'haiku': from lib import file_io, misc haiku_list = file_io.read('haiku.txt', data=True) haiku = misc.choice(haiku_list) + ' #くわ川柳' self.twitter.post(haiku, debug=self.debug) elif job == '575': from job.n575 import Senryu s = Senryu() result = self.execute(s.run) self.twitter.post(result, debug=self.debug) elif job == 'dialogue': from job.reply import Reply reply = Reply() tweet = {'id': 1 << 128, 'user': {'id': 0, 'name': 'まんこ', 'screen_name': 'manko'}, 'created_at': '2015-03-09', 'source': 'm'} while True: tweet['text'] = input() print(reply.respond(tweet)) elif job == 'friends': from job.friends import TwitterFriendsUpdater tfu = TwitterFriendsUpdater() tfu.run() else: raise ValueError('"%s" is not implemented yet' % job)
def haiku(*arg): haiku_list = file_io.read('haiku.txt') return misc.choice(haiku_list) + ' #くわ川柳'
def _random_choice(*arg): while True: yield misc.choice(RESPONSES)
def test_choice(): got = misc.choice('aaaa') assert got == 'a'
def test_choice(): got = misc.choice('aaaa') assert_equals(got, 'a')
def haiku(*arg): haiku_list = file_io.read('haiku.txt', data=True) return misc.choice(haiku_list) + ' #くわ川柳'