def load_json(self): wordmap_conf = file_io.read('atango.json')['WordMap'] self.font_path = wordmap_conf['font'] self.bgcolor = wordmap_conf['bgcolor'] self.fig_size = int(wordmap_conf['fig_size']) self.canvas_size = int(wordmap_conf['canvas_size']) self.overlap_allowable_rate = wordmap_conf['overlap_allowable_rate'] self.min_font_size = wordmap_conf['min_font_size']
def __init__(self): self.cfg = file_io.read('atango.json')['Reply'] self.twitter = api.Twitter() self.db = redis.db('twitter') global_context = self.db.get('global_context') if global_context: self.global_context = json.loads(global_context.decode('utf8')) else: self.global_context = []
def give_valentine_present(*arg): present_list = file_io.read('valentine.txt') present = misc.choice(present_list) search_result = google_image.search(present) if 'images' in search_result: for url in search_result['images']: if not url.endswith(('.jpg', '.gif', '.png')): continue try: web.download(url, '/tmp/present') break except: continue present = normalize.normalize(present) return {'text': '%nameに' + present + 'をヽ(´ー`)ノ', 'media[]': '/tmp/present'}
def give_present(*arg): present_list = file_io.read('present.txt') sentence = misc.choice(present_list) while ('集計' in sentence or 'シュウケイ' in sentence or 'を' not in sentence or sentence.endswith('萌え') or len(sentence) < 3): sentence = misc.choice(present_list) present = normalize.remove_emoticon(sentence) present = present[:-1] if present.endswith('を') else present search_result = google_image.search(present) if 'images' in search_result: for url in search_result['images']: if url.endswith(('.jpg', '.gif', '.png')): try: web.download(url, '/tmp/present') break except: continue sentence = normalize.normalize(sentence) return {'text': u'%nameに' + sentence, 'media[]': '/tmp/present'}
def give_present(*arg): present_list = file_io.read('present.txt', data=True) sentence = misc.choice(present_list) while ('集計' in sentence or 'シュウケイ' in sentence or 'を' not in sentence or sentence.endswith('萌え') or len(sentence) < 3): sentence = misc.choice(present_list) present = normalize.remove_emoticon(sentence) present = present.replace('!', '').replace('!', '') present = present.replace('漏れの', '').replace('俺の', '').replace('俺が', '') present = present[:-1] if present.endswith('を') else present search_result = google_image.search(present) if 'images' in search_result: for url in search_result['images']: if url.endswith(('.jpg', '.gif', '.png')): try: web.download(url, '/tmp/present') break except: continue sentence = normalize.normalize(sentence) return {'text': '%nameに' + sentence, 'media[]': '/tmp/present'}
def test_read(): for (example_path, content) in TEST_DATA.items(): actual = file_io.read(example_path) assert_equals(str(actual), content)
import redis from lib import file_io setting = file_io.read('atango.json')['Redis'] manager = {} def db(name): if name not in manager: info = setting[name] conn = redis.Redis(**info) conn.ping() manager[name] = conn return manager[name]
# -*- coding: utf-8 -*- import json from datetime import datetime from elasticsearch import Elasticsearch, helpers from lib import kuzuha, file_io from lib.db import redis from lib.logger import logger ONE_DAY = 60 * 60 * 24 elasticsearch_setting = file_io.read('atango.json')['elasticsearch'] class ElasticSearchUpdate(object): def __init__(self): self.es = Elasticsearch([elasticsearch_setting]) self.db = redis.db('log') self.actions = [] def find(self, post_id): record = self.db.get('misao:%s' % post_id) if record: record = record.decode('utf8') stored_body = json.loads(record) if 'dt' in stored_body: stored_body['dt'] = datetime.strptime(stored_body['dt'], '%Y-%m-%d-%H-%M-%S') return stored_body return {} def update(self, post_id, body, _op_type): self.actions.append({'_index': "misao", '_type': "log",
def test_read(): for (example_path, content) in TEST_DATA.items(): actual = file_io.read(example_path) assert str(actual) == content
def __init__(self): self.paraphrases = file_io.read('shorten.json', data=True) self.car_shorten_map = file_io.read('shorten_char.json', data=True)
def haiku(*arg): haiku_list = file_io.read('haiku.txt', data=True) return misc.choice(haiku_list) + ' #くわ川柳'
def __init__(self, plot_wordmap=True, up_flickr=False): self.all_words = defaultdict(Word) self.start_time = 0 self.plot_wordmap = plot_wordmap self.up_flickr = up_flickr self.ng_words = set(file_io.read("ng_words.txt"))
import json from flask import Blueprint, request, make_response from lib import normalize, misc, file_io from lib.db import redis from job.reply import Reply from . import compute_id app = Blueprint('api', __name__) fwords = file_io.read('fwords.json') TWO_WEEK = 60*60*24*14 @app.route("/api/dialogue/") def dialogue(): def explicit_fword(text): for (implicit, explicit) in fwords.items(): text = text.replace(implicit, explicit) return text _input = request.args.get('text') _input = explicit_fword(_input) uid = compute_id(request) db = redis.db('twitter') user_info = db.get('user:%s' % uid) if user_info: user_info = json.loads(user_info.decode('utf8')) user_info['tweets'].append(_input) else: user_info = {'replies': [], 'tweets': [_input]} user_info.update({'screen_name': '貴殿', 'name': '貴殿'})
# -*- coding: utf-8 -*- from lib import misc, file_io, normalize, google_image, web RESPONSES = ( "ああ(;´Д`)", "マジで?(;´Д`)", "ワラタ(;´Д`)", "何言ってるんだこの人(;´Д`)", "ヤバイな(;´Д`)", "畏れ(;´Д`)", "わかってるよ(;´Д`)", "つーかまんこだろ" ) config = file_io.read('atango.json')['Reply'] def _random_choice(*arg): while True: yield misc.choice(RESPONSES) def respond_by_rule(*arg): if any(substr in arg[0] for substr in config['VALENTINE']): yield give_valentine_present() elif any(substr in arg[0] for substr in config['PRESENT']): yield give_present() elif any(substr in arg[0] for substr in config['HAIKU']): yield haiku() def present_at_event(*arg):
import json from flask import Blueprint, request, make_response from lib import normalize, misc, file_io from lib.db import redis from job.reply import Reply from . import compute_id app = Blueprint('api', __name__) fwords = file_io.read('fwords.json') TWO_WEEK = 60*60*24*14 @app.route("/sw/api/dialogue/") def dialogue(): def explicit_fword(text): for (implicit, explicit) in fwords.items(): text = text.replace(implicit, explicit) return text _input = request.args.get('text') _input = explicit_fword(_input) uid = compute_id(request) db = redis.db('twitter') user_info = db.get('user:%s' % uid) if user_info: user_info = json.loads(user_info.decode('utf8')) user_info['tweets'].append(_input) else: user_info = {'replies': [], 'tweets': [_input]} user_info.update({'screen_name': '貴殿', 'name': '貴殿'})
def haiku(*arg): haiku_list = file_io.read('haiku.txt') return misc.choice(haiku_list) + ' #くわ川柳'
def __init__(self): self.paraphrases = file_io.read('shorten.json') self.car_shorten_map = file_io.read('shorten_char.json')
def run(self, job): self.setup_logger(job) if job == 'wordmap': from job.wordmap import WordMap wm = WordMap() (text, image) = self.execute(wm.run, hour=1) self.twitter.post(text, image=image, debug=self.debug) elif job == 'food': from job.clause_extractor import FoodExtractor e = FoodExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'okazu': from job.clause_extractor import OkazuExtractor e = OkazuExtractor() self.twitter.post(self.execute(e.run, 24), debug=self.debug) elif job == 'url': from job.popular_url import PopularUrl pop_url = PopularUrl(debug=self.debug) for (i, message) in enumerate(self.execute(pop_url.run, 2), start=1): self.twitter.post(message, debug=self.debug) if i >= 3: break elif job == 'ome': from job.ome import Ome ome = Ome() for message in self.execute(ome.run, 20): self.twitter.post(message, debug=self.debug) elif job == 'summarize': from job.popular_post import PopularPost pp = PopularPost() result = self.execute(pp.run) self.twitter.post(result, debug=self.debug) elif job == 'markov': from job.markov import MarkovTweet mt = MarkovTweet() result = self.execute(mt.run, 60) self.twitter.post(result, debug=self.debug) elif job == 'twitter_respond': from job.twitter_respond import TwitterResponder crawler = TwitterResponder(debug=self.debug) crawler.run() elif job == 'elasticsearch_update': from job.elasticsearch_update import ElasticSearchUpdate updater = ElasticSearchUpdate() updater.run() elif job == 'haiku': from lib import file_io, misc haiku_list = file_io.read('haiku.txt', data=True) haiku = misc.choice(haiku_list) + ' #くわ川柳' self.twitter.post(haiku, debug=self.debug) elif job == '575': from job.n575 import Senryu s = Senryu() result = self.execute(s.run) self.twitter.post(result, debug=self.debug) elif job == 'dialogue': from job.reply import Reply reply = Reply() tweet = {'id': 1 << 128, 'user': {'id': 0, 'name': 'まんこ', 'screen_name': 'manko'}, 'created_at': '2015-03-09', 'source': 'm'} while True: tweet['text'] = input() print(reply.respond(tweet)) elif job == 'friends': from job.friends import TwitterFriendsUpdater tfu = TwitterFriendsUpdater() tfu.run() else: raise ValueError('"%s" is not implemented yet' % job)
from lib import api, web, kuzuha, file_io, google_image, normalize from lib.regex import re_url, re_html_tag from lib.logger import logger HOUR_RANGE = 4 MAX_TWEET_LENGTH = 140 SHORT_URL_LENGTH = 24 HASH_TAG = ' #みさお人気URL' DELIMITER = '/' TWEET_FORMAT = '%s %s %d件' + DELIMITER re_title_delimiter = re.compile(u'[\[\((【|\)\])】]|( \- )|( \| )|( ― )|( : )') re_no_shortened_title = re.compile(u'「.+(||( \- )).+」') image_extensions = ('.jpg', '.png', '.gif', '.jpg:large', '.png:large') ignore_extensions = ('.zip', '.rar', '.swf', '.pdf', '.mp3', '.mp4', '.wmv') cfg = file_io.read('popular_url.json') class PopularUrl(object): def __init__(self, debug=False): self.twitter = api.Twitter() self.debug = debug def _count_url(self, posts): urls = Counter() for post in posts: for item in ('text', 'q1'): if not item in post: continue text = re_html_tag.sub('', post[item]) for url in set(re_url.findall(text)):
from lib import api, web, kuzuha, file_io, google_image, normalize from lib.regex import re_url, re_html_tag from lib.logger import logger HOUR_RANGE = 4 MAX_TWEET_LENGTH = 140 SHORT_URL_LENGTH = 24 HASH_TAG = ' #みさお人気URL' DELIMITER = '/' TWEET_FORMAT = '%s %s %d件' + DELIMITER re_title_delimiter = re.compile(u'[\[\((【|\)\])】]|( \- )|( \| )|( ― )|( : )') re_no_shortened_title = re.compile(u'「.+(||( \- )).+」') image_extensions = ('.jpg', '.png', '.gif', '.jpg:large', '.png:large') ignore_extensions = ('.zip', '.rar', '.swf', '.pdf', '.mp3', '.mp4', '.wmv') cfg = file_io.read('popular_url.json') class PopularUrl(object): def __init__(self, debug=False): self.twitter = api.Twitter() self.debug = debug def _count_url(self, posts): urls = Counter() for post in posts: for item in ('text', 'q1'): if not item in post: continue text = re_html_tag.sub('', post[item])
from bs4 import BeautifulSoup from lib import misc, file_io, normalize, google_image, web RESPONSES = ( "ああ(;´Д`)", "マジで?(;´Д`)", "ワラタ(;´Д`)", "何言ってるんだこの人(;´Д`)", "ヤバイな(;´Д`)", "畏れ(;´Д`)", "わかってるよ(;´Д`)", "つーかまんこだろ" ) config = file_io.read('atango.json')['Reply'] PYTHON_EXE_PATH = '/work/venv/py36/bin/python' STYLE_TRANSFER_PATH = '/work/atango/third_party/fast-style-transfer/' SAFEBOORU_URL = 'https://safebooru.org/index.php?page=dapi&s=post&q=index&tags=chocolate&pid=%s' def _random_choice(*arg): while True: yield misc.choice(RESPONSES) def respond_by_rule(text, *arg): if any(substr in text for substr in config['VALENTINE']): yield give_valentine_present(text, *arg) elif any(substr in text for substr in config['PRESENT']): yield give_present()