def analyze_image(): # Get image data from body file = request.files['file'] # Post header headers = { 'Content-Type': 'application/octet-stream', 'Ocp-Apim-Subscription-Key': subscription_key } # Post params params = {'visualFeatures': 'Description', 'language': 'ja'} # Call API and get tags try: res = req.post(endpoint, headers=headers, params=params, data=file) except Exception(e): print(e) print(res.json()) tags = res.json()['description']['tags'] splited_text = open('./resources/splited.txt').read() text_model = NewlineText(splited_text) for tag in tags: try: sentence = text_model.make_sentence_with_start( tag, tries=300, max_overlap_ratio=0.7).replace(' ', '') return sentence except: pass return 'Could not create sentence'
def train_models(users): users = dict(filter(lambda u: u[1][MESSAGES], users.items())) for user_id, user in users.items(): model = NewlineText('\n'.join(user[MESSAGES]), retain_original=False) users[user_id][MODEL] = model return users
def get_model(bucket, team_id, user_id): key = f"models/{team_id}/{user_id}.json" print(f'downloading model {key}') model_json = BytesIO() bucket.download_fileobj(key, model_json) model = str(model_json.getbuffer(), 'utf-8') return NewlineText.from_chain(model)
def get_compressed_model(bucket, team_id, user_id): key = f"models/{team_id}/{user_id}.gz" print(f'downloading model {key}') model_gz = BytesIO() bucket.download_fileobj(key, model_gz) model = str(zlib.decompress(model_gz.getbuffer()), 'utf-8') return NewlineText.from_chain(model)
async def create_model_for_user(self, user_id: int): results = self.history.find({'author': user_id}, {'msg': 1}) f = StringIO() async for doc in results: f.write(doc['msg']) f.write('\n') model = NewlineText(f.getvalue(), well_formed=False) if model: await self.save_user_model(user_id, model) return model
def handle_one(body): if 'subtype' in body['event']: subtype = body['event']['subtype'] print(f"ignoring event with subtype {subtype}") return print(body) team_id = body['team_id'] user_id = body['event']['user'] channel_id = body['event']['channel'] text = body['event']['text'] print(f'message from {user_id} in {channel_id}: {text}') old_model = get_model(bucket, team_id, user_id) new_model = NewlineText(text, retain_original=False) combined = markovify.combine([old_model, new_model]) put_model(bucket, team_id, user_id, combined)
async def fetch_user_model(self, user_id: int): result = await self.models.find_one({'_id': user_id}, {'msg': 1}) if result: return NewlineText.from_json(zlib.decompress(result['msg']))
from discord.ext.commands import command from .cog import Cog from tswift import Song from markovify import NewlineText try: with open('all.txt', 'rt') as tmg: text = NewlineText(tmg.read(), state_size=2) except FileNotFoundError: text = None class Lyrics(Cog): @command(aliases=['lyrics', 'lyric']) async def get_lyrics(self, song_name): """only mountain goats songs, duh""" song = Song.find_song(song_name + " The mountain goats") if song: lyrics = f'```\n{song.title} - {song.artist}\n{song.lyrics}\n```' await self.bot.say(lyrics) else: await self.bot.reply("Sorry, couldn't find that song") @command(aliases=['sing']) async def make_lyric(self): """Generates Mountain Goats lyrics.""" if text: await self.bot.say(text.make_sentence()) else: await self.bot.say("No text file found for generation.")